summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 20:02:48 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 20:02:48 +0100
commit582cd91f69de8e44857cb610ebca661dac8656b7 (patch)
tree0d680db02a5c236ee87b408b3f13ce33ebaca907 /drivers
parentMerge tag 'for-5.12/libata-2021-02-17' of git://git.kernel.dk/linux-block (diff)
parentmm: simplify swapdev_block (diff)
downloadlinux-582cd91f69de8e44857cb610ebca661dac8656b7.tar.xz
linux-582cd91f69de8e44857cb610ebca661dac8656b7.zip
Merge tag 'for-5.12/block-2021-02-17' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: "Another nice round of removing more code than what is added, mostly due to Christoph's relentless pursuit of tech debt removal/cleanups. This pull request contains: - Two series of BFQ improvements (Paolo, Jan, Jia) - Block iov_iter improvements (Pavel) - bsg error path fix (Pan) - blk-mq scheduler improvements (Jan) - -EBUSY discard fix (Jan) - bvec allocation improvements (Ming, Christoph) - bio allocation and init improvements (Christoph) - Store bdev pointer in bio instead of gendisk + partno (Christoph) - Block trace point cleanups (Christoph) - hard read-only vs read-only split (Christoph) - Block based swap cleanups (Christoph) - Zoned write granularity support (Damien) - Various fixes/tweaks (Chunguang, Guoqing, Lei, Lukas, Huhai)" * tag 'for-5.12/block-2021-02-17' of git://git.kernel.dk/linux-block: (104 commits) mm: simplify swapdev_block sd_zbc: clear zone resources for non-zoned case block: introduce blk_queue_clear_zone_settings() zonefs: use zone write granularity as block size block: introduce zone_write_granularity limit block: use blk_queue_set_zoned in add_partition() nullb: use blk_queue_set_zoned() to setup zoned devices nvme: cleanup zone information initialization block: document zone_append_max_bytes attribute block: use bi_max_vecs to find the bvec pool md/raid10: remove dead code in reshape_request block: mark the bio as cloned in bio_iov_bvec_set block: set BIO_NO_PAGE_REF in bio_iov_bvec_set block: remove a layer of indentation in bio_iov_iter_get_pages block: turn the nr_iovecs argument to bio_alloc* into an unsigned short block: remove the 1 and 4 vec bvec_slabs entries block: streamline bvec_alloc block: factor out a bvec_alloc_gfp helper block: move struct biovec_slab to bio.c block: reuse BIO_INLINE_VECS for integrity bvecs ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h6
-rw-r--r--drivers/block/drbd/drbd_main.c13
-rw-r--r--drivers/block/drbd/drbd_req.c7
-rw-r--r--drivers/block/drbd/drbd_req.h12
-rw-r--r--drivers/block/drbd/drbd_worker.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/null_blk/zoned.c8
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c19
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-cd_ioctl.c2
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/lightnvm/pblk-init.c2
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/request.c39
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-clone-target.c14
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c110
-rw-r--r--drivers/mmc/core/block.c10
-rw-r--r--drivers/nvdimm/blk.c4
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c31
-rw-r--r--drivers/nvme/host/lightnvm.c7
-rw-r--r--drivers/nvme/host/multipath.c6
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/zns.c11
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd_zbc.c43
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_pscsi.c3
71 files changed, 282 insertions, 371 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c43a6ab4b1f3..18bf99906662 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -284,15 +284,11 @@ out:
static blk_qc_t brd_submit_bio(struct bio *bio)
{
- struct brd_device *brd = bio->bi_disk->private_data;
+ struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
+ sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
- sector_t sector;
struct bvec_iter iter;
- sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
- goto io_error;
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 7227fc7ab8ed..72cf7603d51f 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -138,7 +138,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_FUA | REQ_PREFLUSH;
op_flags |= REQ_SYNC;
- bio = bio_alloc_drbd(GFP_NOIO);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index df53dca5d02c..c1f816f896a8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -976,7 +976,7 @@ static void drbd_bm_endio(struct bio *bio)
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_drbd(GFP_NOIO);
+ struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
struct page *page;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8f879e5c2f67..02db50d7e4c6 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1422,8 +1422,6 @@ extern mempool_t drbd_md_io_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
extern struct bio_set drbd_md_io_bio_set;
-/* to allocate from that set */
-extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
/* And a bio_set for cloning */
extern struct bio_set drbd_io_bio_set;
@@ -1579,8 +1577,8 @@ static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_disk) {
- drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
+ if (!bio->bi_bdev) {
+ drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1c8c18b2a25f..788dd97e6026 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -138,19 +138,6 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-struct bio *bio_alloc_drbd(gfp_t gfp_mask)
-{
- struct bio *bio;
-
- if (!bioset_initialized(&drbd_md_io_bio_set))
- return bio_alloc(gfp_mask, 1);
-
- bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
- if (!bio)
- return NULL;
- return bio;
-}
-
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 330f851cb8f0..9dbb660a7d7c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -30,7 +30,10 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL;
memset(req, 0, sizeof(*req));
- drbd_req_make_private_bio(req, bio_src);
+ req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
+
req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
| (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
@@ -1595,7 +1598,7 @@ void do_submit(struct work_struct *ws)
blk_qc_t drbd_submit_bio(struct bio *bio)
{
- struct drbd_device *device = bio->bi_disk->private_data;
+ struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
unsigned long start_jif;
blk_queue_split(&bio);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 55bb0f8721fa..511f39a08de4 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -256,18 +256,6 @@ enum drbd_req_state_bits {
#define MR_WRITE 1
#define MR_READ 2
-static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
-{
- struct bio *bio;
- bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
-
- req->private_bio = bio;
-
- bio->bi_private = req;
- bio->bi_end_io = drbd_request_endio;
- bio->bi_next = NULL;
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_iter.bi_size, or similar. But that would be too ugly. */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 02044ab7f767..64563bfdf0da 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1523,8 +1523,11 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i);
- drbd_req_make_private_bio(req, req->master_bio);
+ req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO,
+ &drbd_io_bio_set);
bio_set_dev(req->private_bio, device->ldev->backing_bdev);
+ req->private_bio->bi_private = req;
+ req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio);
return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 53ac59d19ae5..3fd99836bb1c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1015,7 +1015,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
- blk_execute_rq(rq->q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 5357c3a4a36f..d6c821d48090 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1420,7 +1420,7 @@ static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = bio->bi_disk->private_data;
+ struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index fce0a54df0e5..bfcab1c782b5 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -148,10 +148,6 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
sector += dev->zone_size_sects;
}
- q->limits.zoned = BLK_ZONED_HM;
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
-
return 0;
}
@@ -160,6 +156,10 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
+ blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+
if (queue_is_mq(q)) {
int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index a7af4f27b7c3..897acda20ac8 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
- blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
+ blk_execute_rq(disk->gd, rq, 0);
blk_put_request(rq);
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b8bb8ec7538d..fc4b0f1aa86d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -722,7 +722,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
+ blk_execute_rq(pd->bdev->bd_disk, rq, 0);
if (scsi_req(rq)->result)
ret = -EIO;
out:
@@ -2374,7 +2374,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
blk_queue_split(&bio);
- pd = bio->bi_disk->queue->queuedata;
+ pd = bio->bi_bdev->bd_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@@ -2418,7 +2418,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
split = bio;
}
- pkt_make_request_write(bio->bi_disk->queue, split);
+ pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b71d28372ef3..1d738999fb69 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -581,7 +581,7 @@ out:
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
- struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
+ struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 59cfe71d0b3a..bbb88eb009e0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -692,29 +692,10 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
put_device(&rbd_dev->dev);
}
-static int rbd_set_read_only(struct block_device *bdev, bool ro)
-{
- struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
-
- /*
- * Both images mapped read-only and snapshots can't be marked
- * read-write.
- */
- if (!ro) {
- if (rbd_is_ro(rbd_dev))
- return -EROFS;
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- }
-
- return 0;
-}
-
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
- .set_read_only = rbd_set_read_only,
};
/*
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index edacefff6e35..9a28322a8cd8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -122,7 +122,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
- struct rsxx_cardinfo *card = bio->bi_disk->private_data;
+ struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 4478eb7efee0..2cdf2771f8e8 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -539,7 +539,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
@@ -578,7 +578,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
+ blk_execute_rq_nowait(NULL, rq, true, NULL);
return 0;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 2b95d7b33b91..982732dbe82e 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -521,7 +521,7 @@ static int mm_check_plugged(struct cardinfo *card)
static blk_qc_t mm_submit_bio(struct bio *bio)
{
- struct cardinfo *card = bio->bi_disk->private_data;
+ struct cardinfo *card = bio->bi_bdev->bd_disk->private_data;
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_iter.bi_sector,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 145606dc52db..b0285db7cf4f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -320,7 +320,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
- blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+ blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_put_request(req);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e2933cb7a82a..d6243dbc53cc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1596,7 +1596,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
*/
static blk_qc_t zram_submit_bio(struct bio *bio)
{
- struct zram *zram = bio->bi_disk->private_data;
+ struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8f0e52a71493..90ad34c6ef8e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2214,7 +2214,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(q, cdi->disk, rq, 0);
+ blk_execute_rq(cdi->disk, rq, 0);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 013ad33fbbc8..a1ce9f5ac3aa 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -107,7 +107,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
memcpy(scsi_req(rq)->cmd, pc->c, 12);
if (drive->media == ide_tape)
scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
- blk_execute_rq(drive->queue, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 25d2d88e82ad..cffbcc27a34c 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -467,7 +467,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
}
}
- blk_execute_rq(drive->queue, info->disk, rq, 0);
+ blk_execute_rq(info->disk, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
if (buffer)
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 46f2df288c6a..011eab9c69b7 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -299,7 +299,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
- blk_execute_rq(drive->queue, cd->disk, rq, 0);
+ blk_execute_rq(cd->disk, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
/*
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index f2f93ed40356..ca1d4b3d3878 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -173,7 +173,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
*(int *)&scsi_req(rq)->cmd[1] = arg;
ide_req(rq)->special = setting->set;
- blk_execute_rq(q, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result;
blk_put_request(rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 34b9441084f8..8413731c6259 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -482,7 +482,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
drive->mult_req = arg;
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
blk_put_request(rq);
return (drive->mult_count == arg) ? 0 : -EIO;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 58994da10c06..43fbc37d85c3 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -137,7 +137,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_TASKFILE;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
err = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -235,7 +235,7 @@ static int generic_drive_reset(ide_drive_t *drive)
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
- blk_execute_rq(drive->queue, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
ret = scsi_req(rq)->result;
blk_put_request(rq);
return ret;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 8af7af6001eb..a80a0f28f7b9 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -37,7 +37,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
ide_req(rq)->special = &timeout;
- blk_execute_rq(q, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
rc = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
if (rc)
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 82ab308f1aaf..d680b3e3295f 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -27,7 +27,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
ret = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -50,7 +50,7 @@ static int ide_pm_execute_rq(struct request *rq)
blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
- blk_execute_rq(q, NULL, rq, true);
+ blk_execute_rq(NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 88b96437b22e..fa05e7e7d609 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -868,7 +868,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
goto out_put;
}
- blk_execute_rq(drive->queue, tape->disk, rq, 0);
+ blk_execute_rq(tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
size -= scsi_req(rq)->resid_len;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d016cbe68cba..6665fc4724b9 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -443,7 +443,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
ide_req(rq)->special = cmd;
cmd->rq = rq;
- blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
error = scsi_req(rq)->result ? -EIO : 0;
put_req:
blk_put_request(rq);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index b6246f73895c..5924f09c217b 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -49,7 +49,7 @@ struct bio_set pblk_bio_set;
static blk_qc_t pblk_submit_bio(struct bio *bio)
{
- struct pblk *pblk = bio->bi_disk->queue->queuedata;
+ struct pblk *pblk = bio->bi_bdev->bd_disk->queue->queuedata;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index b00fd08d696b..63e809f38e3f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -114,7 +114,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
if (!check)
return;
- check->bi_disk = bio->bi_disk;
+ bio_set_dev(check, bio->bi_bdev);
check->bi_opf = REQ_OP_READ;
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 85b1f2a9b72d..29c231758293 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
- struct block_device *part;
+ struct block_device *orig_bdev;
unsigned long start_time;
struct btree_op op;
@@ -670,8 +670,8 @@ static void bio_complete(struct search *s)
{
if (s->orig_bio) {
/* Count on bcache device */
- part_end_io_acct(s->part, s->orig_bio, s->start_time);
-
+ bio_end_io_acct_remapped(s->orig_bio, s->start_time,
+ s->orig_bdev);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -714,7 +714,8 @@ static void search_free(struct closure *cl)
}
static inline struct search *search_alloc(struct bio *bio,
- struct bcache_device *d)
+ struct bcache_device *d, struct block_device *orig_bdev,
+ unsigned long start_time)
{
struct search *s;
@@ -732,7 +733,8 @@ static inline struct search *search_alloc(struct bio *bio,
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
/* Count on the bcache device */
- s->start_time = part_start_io_acct(d->disk, &s->part, bio);
+ s->orig_bdev = orig_bdev;
+ s->start_time = start_time;
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -894,7 +896,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_disk) - bio_end_sector(bio));
+ get_capacity(bio->bi_bdev->bd_disk) -
+ bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@@ -1073,7 +1076,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
- struct block_device *part;
+ struct block_device *orig_bdev;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1085,7 +1088,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_private = ddip->bi_private;
/* Count on the bcache device */
- part_end_io_acct(ddip->part, bio, ddip->start_time);
+ bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1098,7 +1101,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io(bio);
}
-static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
+static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
+ struct block_device *orig_bdev, unsigned long start_time)
{
struct detached_dev_io_private *ddip;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
@@ -1111,7 +1115,8 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
/* Count on the bcache device */
- ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
+ ddip->orig_bdev = orig_bdev;
+ ddip->start_time = start_time;
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1167,8 +1172,10 @@ static void quit_max_writeback_rate(struct cache_set *c,
blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct block_device *orig_bdev = bio->bi_bdev;
+ struct bcache_device *d = orig_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ unsigned long start_time;
int rw = bio_data_dir(bio);
if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
@@ -1193,11 +1200,13 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
}
+ start_time = bio_start_io_acct(bio);
+
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, orig_bdev, start_time);
trace_bcache_request_start(s->d, bio);
if (!bio->bi_iter.bi_size) {
@@ -1218,7 +1227,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
}
} else
/* I/O request sent to backing device */
- detached_dev_do_request(d, bio);
+ detached_dev_do_request(d, bio, orig_bdev, start_time);
return BLK_QC_T_NONE;
}
@@ -1274,7 +1283,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
@@ -1282,7 +1291,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
return BLK_QC_T_NONE;
}
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
cl = &s->cl;
bio = &s->bio.bio;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2047a9cccdb5..193fe7652329 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1939,7 +1939,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
goto err;
if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ BIOSET_NEED_RESCUER))
goto err;
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 2ea0360108e1..a3b71350eec8 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -18,8 +18,7 @@
*/
struct dm_bio_details {
- struct gendisk *bi_disk;
- u8 bi_partno;
+ struct block_device *bi_bdev;
int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
@@ -31,8 +30,7 @@ struct dm_bio_details {
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- bd->bi_disk = bio->bi_disk;
- bd->bi_partno = bio->bi_partno;
+ bd->bi_bdev = bio->bi_bdev;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
@@ -44,8 +42,7 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- bio->bi_disk = bd->bi_disk;
- bio->bi_partno = bd->bi_partno;
+ bio->bi_bdev = bd->bi_bdev;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index af6d4f898e4c..89a73204dbf4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -449,7 +449,7 @@ static int __check_incompat_features(struct cache_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(cmd->bdev->bd_disk))
+ if (bdev_read_only(cmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index bdb255edc200..a90bdf9b2ca6 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -85,12 +85,6 @@ struct clone {
struct dm_clone_metadata *cmd;
- /*
- * bio used to flush the destination device, before committing the
- * metadata.
- */
- struct bio flush_bio;
-
/* Region hydration hash table */
struct hash_table_bucket *ht;
@@ -1155,11 +1149,7 @@ static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
goto out;
}
- bio_reset(&clone->flush_bio);
- bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
- clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- r = submit_bio_wait(&clone->flush_bio);
+ r = blkdev_issue_flush(clone->dest_dev->bdev);
if (unlikely(r)) {
__metadata_operation_failed(clone, "flush destination device", r);
goto out;
@@ -1886,7 +1876,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&clone->deferred_flush_completions);
clone->hydration_offset = 0;
atomic_set(&clone->hydrations_in_flight, 0);
- bio_init(&clone->flush_bio, NULL, 0);
clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
if (!clone->wq) {
@@ -1958,7 +1947,6 @@ static void clone_dtr(struct dm_target *ti)
struct clone *clone = ti->private;
mutex_destroy(&clone->commit_lock);
- bio_uninit(&clone->flush_bio);
for (i = 0; i < clone->nr_ctr_args; i++)
kfree(clone->ctr_args[i]);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa09bc4e4c54..b0a82f29a2e4 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_disk == NULL, details were not saved */
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1190,7 +1190,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@@ -1257,7 +1257,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
- if (!bio_record->details.bi_disk) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1282,7 +1282,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@@ -1292,7 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
- bio_record->details.bi_disk = NULL;
+ bio_record->details.bi_bdev = NULL;
return DM_ENDIO_DONE;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6ebb2127f3e2..e75b20480e46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -636,7 +636,7 @@ static int __check_incompat_features(struct thin_disk_superblock *disk_super,
/*
* Check for read-only metadata to skip the following RDWR checks.
*/
- if (get_disk_ro(pmd->bdev->bd_disk))
+ if (bdev_read_only(pmd->bdev))
return 0;
features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index b298fefb022e..039d17b28938 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
return ret;
}
@@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev);
goto err;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7bac564f3faa..479ec5bea09e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -977,16 +977,17 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
- !bio->bi_disk->queue->limits.max_discard_sectors)
+ !q->limits.max_discard_sectors)
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !q->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -996,7 +997,7 @@ static void clone_endio(struct bio *bio)
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
- struct request_queue *q = orig_bio->bi_disk->queue;
+ struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
@@ -1422,8 +1423,7 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- flush_bio.bi_disk = ci->io->md->disk;
- bio_associate_blkg(&flush_bio);
+ bio_set_dev(&flush_bio, ci->io->md->disk->part0);
ci->bio = &flush_bio;
ci->sector_count = 0;
@@ -1626,7 +1626,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
static blk_qc_t dm_submit_bio(struct bio *bio)
{
- struct mapped_device *md = bio->bi_disk->private_data;
+ struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 68cac7d19278..63ed8329a98d 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -252,7 +252,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue))) {
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 04384452a7ab..21da0c48f6c2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -340,24 +340,6 @@ static int start_readonly;
*/
static bool create_on_open = true;
-struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->bio_set))
- return bio_alloc(gfp_mask, nr_iovecs);
-
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
-}
-EXPORT_SYMBOL_GPL(bio_alloc_mddev);
-
-static struct bio *md_bio_alloc_sync(struct mddev *mddev)
-{
- if (!mddev || !bioset_initialized(&mddev->sync_set))
- return bio_alloc(GFP_NOIO, 1);
-
- return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
-}
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -463,8 +445,8 @@ struct md_io {
struct mddev *mddev;
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
+ struct block_device *orig_bi_bdev;
unsigned long start_time;
- struct block_device *part;
};
static void md_end_io(struct bio *bio)
@@ -472,7 +454,7 @@ static void md_end_io(struct bio *bio)
struct md_io *md_io = bio->bi_private;
struct mddev *mddev = md_io->mddev;
- part_end_io_acct(md_io->part, bio, md_io->start_time);
+ bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
bio->bi_end_io = md_io->orig_bi_end_io;
bio->bi_private = md_io->orig_bi_private;
@@ -486,7 +468,7 @@ static void md_end_io(struct bio *bio)
static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
- struct mddev *mddev = bio->bi_disk->private_data;
+ struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
@@ -514,12 +496,12 @@ static blk_qc_t md_submit_bio(struct bio *bio)
md_io->mddev = mddev;
md_io->orig_bi_end_io = bio->bi_end_io;
md_io->orig_bi_private = bio->bi_private;
+ md_io->orig_bi_bdev = bio->bi_bdev;
bio->bi_end_io = md_end_io;
bio->bi_private = md_io;
- md_io->start_time = part_start_io_acct(mddev->gendisk,
- &md_io->part, bio);
+ md_io->start_time = bio_start_io_acct(bio);
}
/* bio could be mergeable after passing to underlayer */
@@ -613,7 +595,7 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
+ bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bio_set_dev(bi, rdev->bdev);
@@ -999,7 +981,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = md_bio_alloc_sync(mddev);
+ bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
atomic_inc(&rdev->nr_pending);
@@ -1031,29 +1013,29 @@ int md_super_wait(struct mddev *mddev)
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, bool metadata_op)
{
- struct bio *bio = md_bio_alloc_sync(rdev->mddev);
- int ret;
+ struct bio bio;
+ struct bio_vec bvec;
+
+ bio_init(&bio, &bvec, 1);
if (metadata_op && rdev->meta_bdev)
- bio_set_dev(bio, rdev->meta_bdev);
+ bio_set_dev(&bio, rdev->meta_bdev);
else
- bio_set_dev(bio, rdev->bdev);
- bio_set_op_attrs(bio, op, op_flags);
+ bio_set_dev(&bio, rdev->bdev);
+ bio.bi_opf = op | op_flags;
if (metadata_op)
- bio->bi_iter.bi_sector = sector + rdev->sb_start;
+ bio.bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
- bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
+ bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
else
- bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio_add_page(bio, page, size, 0);
+ bio.bi_iter.bi_sector = sector + rdev->data_offset;
+ bio_add_page(&bio, page, size, 0);
- submit_bio_wait(bio);
+ submit_bio_wait(&bio);
- ret = !bio->bi_status;
- bio_put(bio);
- return ret;
+ return !bio.bi_status;
}
EXPORT_SYMBOL_GPL(sync_page_io);
@@ -2417,6 +2399,12 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_add_rdev);
+static bool rdev_read_only(struct md_rdev *rdev)
+{
+ return bdev_read_only(rdev->bdev) ||
+ (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
+}
+
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
@@ -2426,8 +2414,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
- if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
- mddev->pers)
+ if (rdev_read_only(rdev) && mddev->pers)
return -EROFS;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
@@ -5861,9 +5848,7 @@ int md_run(struct mddev *mddev)
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
- if (mddev->ro != 1 &&
- (bdev_read_only(rdev->bdev) ||
- bdev_read_only(rdev->meta_bdev))) {
+ if (mddev->ro != 1 && rdev_read_only(rdev)) {
mddev->ro = 1;
if (mddev->gendisk)
set_disk_ro(mddev->gendisk, 1);
@@ -6158,7 +6143,7 @@ static int restart_array(struct mddev *mddev)
if (test_bit(Journal, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
has_journal = true;
- if (bdev_read_only(rdev->bdev))
+ if (rdev_read_only(rdev))
has_readonly = true;
}
rcu_read_unlock();
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 34070ab30a8a..bcbba1b5ec4a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -556,7 +556,7 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
@@ -742,8 +742,6 @@ extern void md_rdev_clear(struct md_rdev *rdev);
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
-extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
- struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
@@ -793,14 +791,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c0347997f6ff..d2378765dc15 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -794,13 +794,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void *)bio->bi_disk;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1104,7 +1104,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
+ behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
if (!behind_bio)
return;
@@ -1520,7 +1520,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c5d88ef6a45c..a9ae7d113492 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -882,13 +882,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1075,13 +1075,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_disk;
+ struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bio->bi_disk->queue)))
+ !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1253,7 +1253,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_disk = (void *)rdev;
+ mbio->bi_bdev = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@@ -3003,7 +3003,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
- * have bi_end_io, bi_sector, bi_disk set,
+ * have bi_end_io, bi_sector, bi_bdev set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.
@@ -4531,7 +4531,7 @@ read_more:
return sectors_done;
}
- read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
+ read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set);
bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
@@ -4539,10 +4539,6 @@ read_more:
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_reshape_read;
bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
- read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
- read_bio->bi_status = 0;
- read_bio->bi_vcnt = 0;
- read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index d0f540296fe9..e8c118e05dfd 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
}
/* flush the disk cache after recovery if necessary */
- ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(rdev->bdev);
out:
__free_page(page);
return ret;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3a90cc0e43ca..a348b2adf2a9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5310,7 +5310,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- WARN_ON_ONCE(bio->bi_partno);
+ WARN_ON_ONCE(bio->bi_bdev->bd_partno);
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
@@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
struct r5conf *conf = mddev->private;
- int dd_idx;
- struct bio* align_bi;
+ struct bio *align_bio;
struct md_rdev *rdev;
- sector_t end_sector;
+ sector_t sector, end_sector, first_bad;
+ int bad_sectors, dd_idx;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__);
return 0;
}
- /*
- * use bio_clone_fast to make a copy of the bio
- */
- align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
- if (!align_bi)
- return 0;
- /*
- * set bi_end_io to a new function, and set bi_private to the
- * original bio.
- */
- align_bi->bi_end_io = raid5_align_endio;
- align_bi->bi_private = raid_bio;
- /*
- * compute position
- */
- align_bi->bi_iter.bi_sector =
- raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
- 0, &dd_idx, NULL);
- end_sector = bio_end_sector(align_bi);
+ sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
+ &dd_idx, NULL);
+ end_sector = bio_end_sector(raid_bio);
+
rcu_read_lock();
+ if (r5c_big_stripe_cached(conf, sector))
+ goto out_rcu_unlock;
+
rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) {
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev &&
- (test_bit(Faulty, &rdev->flags) ||
+ if (!rdev)
+ goto out_rcu_unlock;
+ if (test_bit(Faulty, &rdev->flags) ||
!(test_bit(In_sync, &rdev->flags) ||
- rdev->recovery_offset >= end_sector)))
- rdev = NULL;
+ rdev->recovery_offset >= end_sector))
+ goto out_rcu_unlock;
}
- if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
- rcu_read_unlock();
- bio_put(align_bi);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+
+ align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
+ bio_set_dev(align_bio, rdev->bdev);
+ align_bio->bi_end_io = raid5_align_endio;
+ align_bio->bi_private = raid_bio;
+ align_bio->bi_iter.bi_sector = sector;
+
+ raid_bio->bi_next = (void *)rdev;
+
+ if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
+ &bad_sectors)) {
+ bio_put(align_bio);
+ rdev_dec_pending(rdev, mddev);
return 0;
}
- if (rdev) {
- sector_t first_bad;
- int bad_sectors;
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- raid_bio->bi_next = (void*)rdev;
- bio_set_dev(align_bi, rdev->bdev);
-
- if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
- bio_sectors(align_bi),
- &first_bad, &bad_sectors)) {
- bio_put(align_bi);
- rdev_dec_pending(rdev, mddev);
- return 0;
- }
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bio->bi_iter.bi_sector += rdev->data_offset;
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_iter.bi_sector += rdev->data_offset;
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
+ conf->device_lock);
+ atomic_inc(&conf->active_aligned_reads);
+ spin_unlock_irq(&conf->device_lock);
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_quiescent,
- conf->quiesce == 0,
- conf->device_lock);
- atomic_inc(&conf->active_aligned_reads);
- spin_unlock_irq(&conf->device_lock);
+ if (mddev->gendisk)
+ trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
+ raid_bio->bi_iter.bi_sector);
+ submit_bio_noacct(align_bio);
+ return 1;
- if (mddev->gendisk)
- trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_iter.bi_sector);
- submit_bio_noacct(align_bi);
- return 1;
- } else {
- rcu_read_unlock();
- bio_put(align_bi);
- return 0;
- }
+out_rcu_unlock:
+ rcu_read_unlock();
+ return 0;
}
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 42e27a298218..a1d6b68320ae 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -253,7 +253,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
@@ -629,7 +629,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_put_request(req);
@@ -698,7 +698,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
@@ -2722,7 +2722,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
@@ -2761,7 +2761,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
- blk_execute_rq(mq->queue, NULL, req, 0);
+ blk_execute_rq(NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
if (err) {
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 22e5617b2cea..e03a1f38d750 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -165,7 +165,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
static blk_qc_t nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
- struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
+ struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -177,7 +177,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 12ff6f8784ac..41aa1f01fc07 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1442,7 +1442,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
static blk_qc_t btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct btt *btt = bio->bi_disk->private_data;
+ struct btt *btt = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -1452,7 +1452,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f33bdae626ba..281fedb4dc4d 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -196,13 +196,13 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
- struct pmem_device *pmem = bio->bi_disk->private_data;
+ struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH)
ret = nvdimm_flush(nd_region, bio);
- do_acct = blk_queue_io_stat(bio->bi_disk->queue);
+ do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f13eb4ded95f..89cacff897a8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -925,7 +925,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
rq->cmd_flags |= REQ_HIPRI;
rq->end_io_data = &wait;
- blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
+ blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
while (!completion_done(&wait)) {
blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
@@ -964,7 +964,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
if (poll)
nvme_execute_rq_polled(req->q, NULL, req, at_head);
else
- blk_execute_rq(req->q, NULL, req, at_head);
+ blk_execute_rq(NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -1101,7 +1101,7 @@ void nvme_execute_passthru_rq(struct request *rq)
u32 effects;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- blk_execute_rq(rq->q, disk, rq, 0);
+ blk_execute_rq(disk, rq, 0);
nvme_passthru_end(ctrl, effects);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1113,7 +1113,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
struct bio *bio = NULL;
void *meta = NULL;
@@ -1133,8 +1133,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (ret)
goto out;
bio = req->bio;
- bio->bi_disk = disk;
- if (disk && meta_buffer && meta_len) {
+ if (bdev)
+ bio_set_dev(bio, bdev);
+ if (bdev && meta_buffer && meta_len) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write);
if (IS_ERR(meta)) {
@@ -1202,7 +1203,7 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
- blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
return 0;
}
@@ -2125,9 +2126,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_config_discard(disk, ns);
nvme_config_write_zeroes(disk, ns);
- if ((id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags))
- set_disk_ro(disk, true);
+ set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
+ test_bit(NVME_NS_FORCE_RO, &ns->flags));
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2176,17 +2176,18 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
+ ret = nvme_configure_metadata(ns, id);
+ if (ret)
+ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
if (ret)
goto out_unfreeze;
}
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->disk, ns, id);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 470cef3abec3..b705988629f2 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -695,7 +695,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
rq->end_io_data = rqd;
- blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
+ blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io);
return 0;
@@ -757,7 +757,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
{
bool write = nvme_is_write((struct nvme_command *)vcmd);
struct nvm_dev *dev = ns->ndev;
- struct gendisk *disk = ns->disk;
struct request *rq;
struct bio *bio = NULL;
__le64 *ppa_list = NULL;
@@ -817,10 +816,10 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
}
- bio->bi_disk = disk;
+ bio_set_dev(bio, ns->disk->part0);
}
- blk_execute_rq(q, NULL, rq, 0);
+ blk_execute_rq(NULL, rq, 0);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 282b7a4ea9a9..1427c9555cef 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -296,7 +296,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
- struct nvme_ns_head *head = bio->bi_disk->private_data;
+ struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -312,7 +312,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (likely(ns)) {
- bio->bi_disk = ns->disk;
+ bio_set_dev(bio, ns->disk->part0);
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
@@ -352,7 +352,7 @@ static void nvme_requeue_work(struct work_struct *work)
* Reset disk to the mpath node and resubmit to select a new
* path.
*/
- bio->bi_disk = head->disk;
+ bio_set_dev(bio, head->disk->part0);
submit_bio_noacct(bio);
}
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6bad4d4dcdf0..808ab5186928 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1357,7 +1357,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
abort_req->end_io_data = NULL;
- blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
+ blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
/*
* The aborted req will be completed on receiving the abort req.
@@ -2281,7 +2281,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- blk_execute_rq_nowait(q, NULL, req, false,
+ blk_execute_rq_nowait(NULL, req, false,
opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b7ce4f221d99..f5ef3edeb2fd 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1468,7 +1468,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
req->mr->sig_attrs, ns->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 1dfe9a3500e3..c7e3ec561ba0 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -9,13 +9,7 @@
int nvme_revalidate_zones(struct nvme_ns *ns)
{
- struct request_queue *q = ns->queue;
- int ret;
-
- ret = blk_revalidate_disk_zones(ns->disk, NULL);
- if (!ret)
- blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
- return ret;
+ return blk_revalidate_disk_zones(ns->disk, NULL);
}
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
@@ -109,10 +103,11 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_set_zoned(ns->disk, BLK_ZONED_HM);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 125dde3f410e..bf6e0ac9ad28 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -333,7 +333,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
+ if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b9776fc8f08f..cbc88acdd233 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -275,7 +275,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
schedule_work(&req->p.work);
} else {
rq->end_io_data = req;
- blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
+ blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
nvmet_passthru_req_done);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c7eb9a10c680..28c04a4efa66 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -428,23 +428,15 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device)
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
- struct gendisk *disk;
- struct disk_part_iter piter;
- struct block_device *part;
-
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
if ((device->features & DASD_FEATURE_USERAW)) {
- disk = device->block->gdp;
- kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
return 0;
}
- disk = device->block->bdev->bd_disk;
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
- while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
- disk_part_iter_exit(&piter);
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
}
return 0;
}
@@ -455,9 +447,6 @@ dasd_state_ready_to_online(struct dasd_device * device)
static int dasd_state_online_to_ready(struct dasd_device *device)
{
int rc;
- struct gendisk *disk;
- struct disk_part_iter piter;
- struct block_device *part;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device);
@@ -466,13 +455,8 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
}
device->state = DASD_STATE_READY;
- if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
- disk = device->block->bdev->bd_disk;
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
- while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
- disk_part_iter_exit(&piter);
- }
+ if (device->block && !(device->features & DASD_FEATURE_USERAW))
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 299e77ec2c41..da33cb4cba28 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -879,17 +879,13 @@ dcssblk_submit_bio(struct bio *bio)
blk_queue_split(&bio);
bytes_done = 0;
- dev_info = bio->bi_disk->private_data;
+ dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */
goto fail;
- if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
- /* Request beyond end of DCSS segment. */
- goto fail;
- }
/* verify data transfer direction */
if (dev_info->is_shared) {
switch (dev_info->segment_type) {
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c2536f7767b3..d1ed39162943 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,7 +184,7 @@ static unsigned long xpram_highest_page_index(void)
*/
static blk_qc_t xpram_submit_bio(struct bio *bio)
{
- xpram_device_t *xdev = bio->bi_disk->private_data;
+ xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int index;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index f11f51e2465f..c00f06e9ecb0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2007,7 +2007,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->timeout = 10 * HZ;
rq->retries = 5;
- blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b3f14f05340a..4d2280658559 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -269,7 +269,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(req->q, NULL, req, 1);
+ blk_execute_rq(NULL, req, 1);
/*
* Some devices (USB mass-storage in particular) may transfer
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index cf07b7f93579..03adb39293c2 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -665,12 +665,28 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
return 0;
}
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
+static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
{
+ /* Serialize against revalidate zones */
+ mutex_lock(&sdkp->rev_mutex);
+
kvfree(sdkp->zones_wp_offset);
sdkp->zones_wp_offset = NULL;
kfree(sdkp->zone_wp_update_buf);
sdkp->zone_wp_update_buf = NULL;
+
+ sdkp->nr_zones = 0;
+ sdkp->rev_nr_zones = 0;
+ sdkp->zone_blocks = 0;
+ sdkp->rev_zone_blocks = 0;
+
+ mutex_unlock(&sdkp->rev_mutex);
+}
+
+void sd_zbc_release_disk(struct scsi_disk *sdkp)
+{
+ if (sd_is_zoned(sdkp))
+ sd_zbc_clear_zone_info(sdkp);
}
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
@@ -769,6 +785,21 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
*/
return 0;
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ if (!blk_queue_is_zoned(q)) {
+ /*
+ * This can happen for a host aware disk with partitions.
+ * The block device zone information was already cleared
+ * by blk_queue_set_zoned(). Only clear the scsi disk zone
+ * information and exit early.
+ */
+ sd_zbc_clear_zone_info(sdkp);
+ return 0;
+ }
+
/* Check zoned block device characteristics (unconstrained reads) */
ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
@@ -789,9 +820,13 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
blk_queue_max_active_zones(q, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
+ /*
+ * Per ZBC and ZAC specifications, writes in sequential write required
+ * zones of host-managed devices must be aligned to the device physical
+ * block size.
+ */
+ if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
sdkp->rev_nr_zones = nr_zones;
sdkp->rev_zone_blocks = zone_blocks;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index bfa8d77322d7..4383d93110f8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -829,8 +829,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, at_head, sg_rq_end_io);
+ blk_execute_rq_nowait(sdp->disk, srp->rq, at_head, sg_rq_end_io);
return 0;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 43f7624508a9..841ad2fc369a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -585,7 +585,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
rq->retries = retries;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
return 0;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b0cb5b95e892..cce455929778 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -241,6 +241,7 @@ struct target_core_file_cmd {
unsigned long len;
struct se_cmd *cmd;
struct kiocb iocb;
+ struct bio_vec bvecs[];
};
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -268,29 +269,22 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct target_core_file_cmd *aio_cmd;
struct iov_iter iter = {};
struct scatterlist *sg;
- struct bio_vec *bvec;
ssize_t len = 0;
int ret = 0, i;
- aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
+ aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
if (!aio_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
- if (!bvec) {
- kfree(aio_cmd);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
for_each_sg(sgl, sg, sgl_nents, i) {
- bvec[i].bv_page = sg_page(sg);
- bvec[i].bv_len = sg->length;
- bvec[i].bv_offset = sg->offset;
+ aio_cmd->bvecs[i].bv_page = sg_page(sg);
+ aio_cmd->bvecs[i].bv_len = sg->length;
+ aio_cmd->bvecs[i].bv_offset = sg->offset;
len += sg->length;
}
- iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
aio_cmd->cmd = cmd;
aio_cmd->len = len;
@@ -307,8 +301,6 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
- kfree(bvec);
-
if (ret != -EIOCBQUEUED)
cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7994f27e4527..33770e5808ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1000,8 +1000,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->timeout = PS_TIMEOUT_OTHER;
scsi_req(req)->retries = PS_RETRY;
- blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
- (cmd->sam_task_attr == TCM_HEAD_TAG),
+ blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG),
pscsi_req_done);
return 0;