diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/apple.c | 5 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 5 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 18 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 4 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 15 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 15 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 17 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 12 | ||||
-rw-r--r-- | drivers/nvme/target/zns.c | 24 |
13 files changed, 64 insertions, 69 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index d702d7d60235..5c352d5d8ee6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -862,8 +862,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) } } -static enum blk_eh_timer_return apple_nvme_timeout(struct request *req, - bool reserved) +static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) { struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); struct apple_nvme_queue *q = iod->q; @@ -1502,7 +1501,7 @@ static int apple_nvme_probe(struct platform_device *pdev) if (!blk_get_queue(anv->ctrl.admin_q)) { nvme_start_admin_queue(&anv->ctrl); - blk_cleanup_queue(anv->ctrl.admin_q); + blk_mq_destroy_queue(anv->ctrl.admin_q); anv->ctrl.admin_q = NULL; ret = -ENODEV; goto put_dev; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6a12a906a11e..2533b88e66d5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -418,7 +418,7 @@ blk_status_t nvme_host_path_error(struct request *req) } EXPORT_SYMBOL_GPL(nvme_host_path_error); -bool nvme_cancel_request(struct request *req, void *data, bool reserved) +bool nvme_cancel_request(struct request *req, void *data) { dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, "Cancelling I/O %d", req->tag); @@ -4061,7 +4061,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, mutex_unlock(&ctrl->subsys->lock); nvme_put_ns_head(ns->head); out_cleanup_disk: - blk_cleanup_disk(disk); + put_disk(disk); out_free_ns: kfree(ns); out_free_id: @@ -4103,7 +4103,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (!nvme_ns_head_multipath(ns->head)) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); - blk_cleanup_queue(ns->queue); down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 3c778bb0c294..9987797620b6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref) unsigned long flags; if (ctrl->ctrl.tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(&ctrl->tag_set); } @@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref) spin_unlock_irqrestore(&ctrl->rport->lock, flags); nvme_start_admin_queue(&ctrl->ctrl); - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); kfree(ctrl->queues); @@ -2456,8 +2456,7 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) * status. The done path will return the io request back to the block * layer with an error status. */ -static bool -nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) +static bool nvme_fc_terminate_exchange(struct request *req, void *data) { struct nvme_ctrl *nctrl = data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); @@ -2565,8 +2564,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) nvme_reset_ctrl(&ctrl->ctrl); } -static enum blk_eh_timer_return -nvme_fc_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; @@ -2953,7 +2951,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) out_delete_hw_queues: nvme_fc_delete_hw_io_queues(ctrl); out_cleanup_blk_queue: - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tag_set: blk_mq_free_tag_set(&ctrl->tag_set); nvme_fc_free_io_queues(ctrl); @@ -3642,9 +3640,9 @@ fail_ctrl: return ERR_PTR(-EIO); out_cleanup_admin_q: - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_admin_tag_set: blk_mq_free_tag_set(&ctrl->admin_tag_set); out_free_queues: diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index a2e89db1cd63..27614bee7380 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -68,7 +68,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, void **metap, unsigned timeout, bool vec, - unsigned int rq_flags, blk_mq_req_flags_t blk_flags) + blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -407,7 +407,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_uring_data d; struct nvme_command c; struct request *req; - unsigned int rq_flags = 0; + blk_opf_t rq_flags = 0; blk_mq_req_flags_t blk_flags = 0; void *meta = NULL; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d3e2440d8abb..f26640ccb955 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -830,7 +830,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) - ns->head->disk->queue->nr_zones = ns->queue->nr_zones; + ns->head->disk->nr_zones = ns->disk->nr_zones; #endif } @@ -853,7 +853,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); - blk_cleanup_disk(head->disk); + put_disk(head->disk); } void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5558f8812157..7e0a925bf3be 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -698,7 +698,7 @@ static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, } blk_status_t nvme_host_path_error(struct request *req); -bool nvme_cancel_request(struct request *req, void *data, bool reserved); +bool nvme_cancel_request(struct request *req, void *data); void nvme_cancel_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, @@ -734,7 +734,7 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); void nvme_start_freeze(struct nvme_ctrl *ctrl); -static inline unsigned int nvme_req_op(struct nvme_command *cmd) +static inline enum req_op nvme_req_op(struct nvme_command *cmd) { return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 73d9fcba3b1c..7e7d4802ac6b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1344,7 +1344,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } -static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) +static enum blk_eh_timer_return nvme_timeout(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = iod->nvmeq; @@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev) * queue to flush these to completion. */ nvme_start_admin_queue(&dev->ctrl); - blk_cleanup_queue(dev->ctrl.admin_q); + blk_mq_destroy_queue(dev->ctrl.admin_q); blk_mq_free_tag_set(&dev->admin_tagset); } } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 46c2dcf72f7e..4665aebd944d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { if (remove) { - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { @@ -935,10 +935,10 @@ out_stop_queue: nvme_cancel_admin_tagset(&ctrl->ctrl); out_cleanup_queue: if (new) - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: if (new) - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); @@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (remove) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(ctrl->ctrl.tagset); } nvme_rdma_free_io_queues(ctrl); @@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out: out_cleanup_connect_q: nvme_cancel_tagset(&ctrl->ctrl); if (new) - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tag_set: if (new) blk_mq_free_tag_set(ctrl->ctrl.tagset); @@ -2021,8 +2021,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq) nvmf_complete_timed_out_request(rq); } -static enum blk_eh_timer_return -nvme_rdma_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7a9e6ffa2342..b95ee85053e3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1884,7 +1884,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_io_queues(ctrl); if (remove) { - blk_cleanup_queue(ctrl->connect_q); + blk_mq_destroy_queue(ctrl->connect_q); blk_mq_free_tag_set(ctrl->tagset); } nvme_tcp_free_io_queues(ctrl); @@ -1939,7 +1939,7 @@ out_wait_freeze_timed_out: out_cleanup_connect_q: nvme_cancel_tagset(ctrl); if (new) - blk_cleanup_queue(ctrl->connect_q); + blk_mq_destroy_queue(ctrl->connect_q); out_free_tag_set: if (new) blk_mq_free_tag_set(ctrl->tagset); @@ -1952,8 +1952,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) { nvme_tcp_stop_queue(ctrl, 0); if (remove) { - blk_cleanup_queue(ctrl->admin_q); - blk_cleanup_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->admin_q); + blk_mq_destroy_queue(ctrl->fabrics_q); blk_mq_free_tag_set(ctrl->admin_tagset); } nvme_tcp_free_admin_queue(ctrl); @@ -2011,10 +2011,10 @@ out_stop_queue: nvme_cancel_admin_tagset(ctrl); out_cleanup_queue: if (new) - blk_cleanup_queue(ctrl->admin_q); + blk_mq_destroy_queue(ctrl->admin_q); out_cleanup_fabrics_q: if (new) - blk_cleanup_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->fabrics_q); out_free_tagset: if (new) blk_mq_free_tag_set(ctrl->admin_tagset); @@ -2323,8 +2323,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq) nvmf_complete_timed_out_request(rq); } -static enum blk_eh_timer_return -nvme_tcp_timeout(struct request *rq, bool reserved) +static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 9f81beb4df4e..12316ab51bda 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -109,10 +109,10 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) goto free_data; } - blk_queue_set_zoned(ns->disk, BLK_ZONED_HM); + disk_set_zoned(ns->disk, BLK_ZONED_HM); blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); - blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); - blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); + disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1); + disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1); free_data: kfree(id); return status; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 27a72504d31c..2dc1c1035626 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -246,7 +246,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) struct scatterlist *sg; struct blk_plug plug; sector_t sector; - int op, i, rc; + blk_opf_t opf; + int i, rc; struct sg_mapping_iter prot_miter; unsigned int iter_flags; unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len; @@ -260,26 +261,26 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } if (req->cmd->rw.opcode == nvme_cmd_write) { - op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; + opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) - op |= REQ_FUA; + opf |= REQ_FUA; iter_flags = SG_MITER_TO_SG; } else { - op = REQ_OP_READ; + opf = REQ_OP_READ; iter_flags = SG_MITER_FROM_SG; } if (is_pci_p2pdma_page(sg_page(req->sg))) - op |= REQ_NOMERGE; + opf |= REQ_NOMERGE; sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); if (nvmet_use_inline_bvec(req)) { bio = &req->b.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, - ARRAY_SIZE(req->inline_bvec), op); + ARRAY_SIZE(req->inline_bvec), opf); } else { - bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op, + bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, GFP_KERNEL); } bio->bi_iter.bi_sector = sector; @@ -306,7 +307,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) } bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), - op, GFP_KERNEL); + opf, GFP_KERNEL); bio->bi_iter.bi_sector = sector; bio_chain(bio, prev); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 59024af2da2e..0f5c77e22a0a 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); - blk_cleanup_queue(ctrl->ctrl.admin_q); - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); } @@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) mutex_unlock(&nvme_loop_ctrl_mutex); if (nctrl->tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(&ctrl->tag_set); } kfree(ctrl->queues); @@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) out_cleanup_queue: clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); - blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_destroy_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: - blk_cleanup_queue(ctrl->ctrl.fabrics_q); + blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->admin_tag_set); out_free_sq: @@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) return 0; out_cleanup_connect_q: - blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_destroy_queue(ctrl->ctrl.connect_q); out_free_tagset: blk_mq_free_tag_set(&ctrl->tag_set); out_destroy_queues: diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 82b61acf7a72..c7ef69f29fe4 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -57,10 +57,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) * zones, reject the device. Otherwise, use report zones to detect if * the device has conventional zones. */ - if (ns->bdev->bd_disk->queue->conv_zones_bitmap) + if (ns->bdev->bd_disk->conv_zones_bitmap) return false; - ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), + ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev), validate_conv_zones_cb, NULL); if (ret < 0) return false; @@ -241,7 +241,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) { unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); - return blkdev_nr_zones(req->ns->bdev->bd_disk) - + return bdev_nr_zones(req->ns->bdev) - (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); } @@ -308,7 +308,7 @@ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) queue_work(zbd_wq, &req->z.zmgmt_work); } -static inline enum req_opf zsa_req_op(u8 zsa) +static inline enum req_op zsa_req_op(u8 zsa) { switch (zsa) { case NVME_ZONE_OPEN: @@ -386,7 +386,7 @@ static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) { struct block_device *bdev = req->ns->bdev; - unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); + unsigned int nr_zones = bdev_nr_zones(bdev); struct request_queue *q = bdev_get_queue(bdev); struct bio *bio = NULL; sector_t sector = 0; @@ -413,8 +413,8 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) ret = 0; } - while (sector < get_capacity(bdev->bd_disk)) { - if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { + while (sector < bdev_nr_sectors(bdev)) { + if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) { bio = blk_next_bio(bio, bdev, 0, zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, GFP_KERNEL); @@ -422,7 +422,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) /* This may take a while, so be nice to others */ cond_resched(); } - sector += blk_queue_zone_sectors(q); + sector += bdev_zone_sectors(bdev); } if (bio) { @@ -465,7 +465,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); - enum req_opf op = zsa_req_op(req->cmd->zms.zsa); + enum req_op op = zsa_req_op(req->cmd->zms.zsa); struct block_device *bdev = req->ns->bdev; sector_t zone_sectors = bdev_zone_sectors(bdev); u16 status = NVME_SC_SUCCESS; @@ -525,7 +525,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio) void nvmet_bdev_execute_zone_append(struct nvmet_req *req) { sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); - const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; + const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; @@ -556,9 +556,9 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (nvmet_use_inline_bvec(req)) { bio = &req->z.inline_bio; bio_init(bio, req->ns->bdev, req->inline_bvec, - ARRAY_SIZE(req->inline_bvec), op); + ARRAY_SIZE(req->inline_bvec), opf); } else { - bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL); + bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); } bio->bi_end_io = nvmet_bdev_zone_append_bio_done; |