summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-29 20:51:49 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-29 20:51:49 +0100
commit0a4b6e2f80aad46fb55a5cf7b1664c0aef030ee0 (patch)
treecefccd67dc1f27bb45830f6b8065dd4a1c05e83b /block/blk-core.c
parentMerge tag 'edac_for_4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp (diff)
parentblock: remove smart1,2.h (diff)
downloadlinux-0a4b6e2f80aad46fb55a5cf7b1664c0aef030ee0.tar.xz
linux-0a4b6e2f80aad46fb55a5cf7b1664c0aef030ee0.zip
Merge branch 'for-4.16/block' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "This is the main pull request for block IO related changes for the 4.16 kernel. Nothing major in this pull request, but a good amount of improvements and fixes all over the map. This contains: - BFQ improvements, fixes, and cleanups from Angelo, Chiara, and Paolo. - Support for SMR zones for deadline and mq-deadline from Damien and Christoph. - Set of fixes for bcache by way of Michael Lyle, including fixes from himself, Kent, Rui, Tang, and Coly. - Series from Matias for lightnvm with fixes from Hans Holmberg, Javier, and Matias. Mostly centered around pblk, and the removing rrpc 1.2 in preparation for supporting 2.0. - A couple of NVMe pull requests from Christoph. Nothing major in here, just fixes and cleanups, and support for command tracing from Johannes. - Support for blk-throttle for tracking reads and writes separately. From Joseph Qi. A few cleanups/fixes also for blk-throttle from Weiping. - Series from Mike Snitzer that enables dm to register its queue more logically, something that's alwways been problematic on dm since it's a stacked device. - Series from Ming cleaning up some of the bio accessor use, in preparation for supporting multipage bvecs. - Various fixes from Ming closing up holes around queue mapping and quiescing. - BSD partition fix from Richard Narron, fixing a problem where we can't mount newer (10/11) FreeBSD partitions. - Series from Tejun reworking blk-mq timeout handling. The previous scheme relied on atomic bits, but it had races where we would think a request had timed out if it to reused at the wrong time. - null_blk now supports faking timeouts, to enable us to better exercise and test that functionality separately. From me. - Kill the separate atomic poll bit in the request struct. After this, we don't use the atomic bits on blk-mq anymore at all. From me. - sgl_alloc/free helpers from Bart. - Heavily contended tag case scalability improvement from me. - Various little fixes and cleanups from Arnd, Bart, Corentin, Douglas, Eryu, Goldwyn, and myself" * 'for-4.16/block' of git://git.kernel.dk/linux-block: (186 commits) block: remove smart1,2.h nvme: add tracepoint for nvme_complete_rq nvme: add tracepoint for nvme_setup_cmd nvme-pci: introduce RECONNECTING state to mark initializing procedure nvme-rdma: remove redundant boolean for inline_data nvme: don't free uuid pointer before printing it nvme-pci: Suspend queues after deleting them bsg: use pr_debug instead of hand crafted macros blk-mq-debugfs: don't allow write on attributes with seq_operations set nvme-pci: Fix queue double allocations block: Set BIO_TRACE_COMPLETION on new bio during split blk-throttle: use queue_is_rq_based block: Remove kblockd_schedule_delayed_work{,_on}() blk-mq: Avoid that blk_mq_delay_run_hw_queue() introduces unintended delays blk-mq: Rename blk_mq_request_direct_issue() into blk_mq_request_issue_directly() lib/scatterlist: Fix chaining support in sgl_alloc_order() blk-throttle: track read and write request individually block: add bdev_read_only() checks to common helpers block: fail op_is_write() requests to read-only partitions blk-throttle: export io_serviced_recursive, io_service_bytes_recursive ...
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c87
1 files changed, 52 insertions, 35 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3ba4326a63b5..a2005a485335 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -126,6 +126,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
+ seqcount_init(&rq->gstate_seq);
+ u64_stats_init(&rq->aborted_gstate_sync);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -699,6 +701,15 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /*
+ * make sure all in-progress dispatch are completed because
+ * blk_freeze_queue() can only complete all requests, and
+ * dispatch may still be in-progress since we dispatch requests
+ * from more than one contexts
+ */
+ if (q->mq_ops)
+ blk_mq_quiesce_queue(q);
+
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -1646,6 +1657,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
lockdep_assert_held(q->queue_lock);
+ blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -2055,6 +2067,21 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+{
+ if (part->policy && op_is_write(bio_op(bio))) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR
+ "generic_make_request: Trying to write "
+ "to read-only block-device %s (partno %d)\n",
+ bio_devname(bio, b), part->partno);
+ return true;
+ }
+
+ return false;
+}
+
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
@@ -2063,27 +2090,28 @@ static inline int blk_partition_remap(struct bio *bio)
struct hd_struct *p;
int ret = 0;
+ rcu_read_lock();
+ p = __disk_get_part(bio->bi_disk, bio->bi_partno);
+ if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) ||
+ bio_check_ro(bio, p))) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
- if (!bio->bi_partno ||
- (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
- return 0;
+ if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)
+ goto out;
- rcu_read_lock();
- p = __disk_get_part(bio->bi_disk, bio->bi_partno);
- if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
- bio->bi_iter.bi_sector += p->start_sect;
- bio->bi_partno = 0;
- trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
- bio->bi_iter.bi_sector - p->start_sect);
- } else {
- printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
- ret = -EIO;
- }
- rcu_read_unlock();
+ bio->bi_iter.bi_sector += p->start_sect;
+ bio->bi_partno = 0;
+ trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+ bio->bi_iter.bi_sector - p->start_sect);
+out:
+ rcu_read_unlock();
return ret;
}
@@ -2142,15 +2170,19 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
-
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported;
if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
goto end_io;
- if (blk_partition_remap(bio))
- goto end_io;
+ if (!bio->bi_partno) {
+ if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
+ goto end_io;
+ } else {
+ if (blk_partition_remap(bio))
+ goto end_io;
+ }
if (bio_check_eod(bio, nr_sectors))
goto end_io;
@@ -2493,8 +2525,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- blk_mq_request_bypass_insert(rq, true);
- return BLK_STS_OK;
+ return blk_mq_request_issue_directly(rq);
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2846,7 +2877,7 @@ void blk_start_request(struct request *req)
wbt_issue(req->q->rq_wb, &req->issue_stat);
}
- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ BUG_ON(blk_rq_is_complete(req));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
@@ -3415,20 +3446,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
-
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized