diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-13 23:27:19 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-13 23:27:19 +0100 |
commit | f1fcd7786ec8e316b69860ab856f29f346a9b301 (patch) | |
tree | e913c7a36d17e6aad5e451fffa37e4a3d51c7115 /block | |
parent | Merge tag 'io_uring-5.5-20191212' of git://git.kernel.dk/linux-block (diff) | |
parent | blk-cgroup: remove blkcg_drain_queue (diff) | |
download | linux-f1fcd7786ec8e316b69860ab856f29f346a9b301.tar.xz linux-f1fcd7786ec8e316b69860ab856f29f346a9b301.zip |
Merge tag 'for-linus-20191212' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- stable fix for the bi_size overflow. Not a corruption issue, but a
case wher we could merge but disallowed (Andreas)
- NVMe pull request via Keith, with various fixes.
- MD pull request from Song.
- Merge window regression fix for the rq passthrough stats (Logan)
- Remove unused blkcg_drain_queue() function (Guoqing)
* tag 'for-linus-20191212' of git://git.kernel.dk/linux-block:
blk-cgroup: remove blkcg_drain_queue
block: fix NULL pointer dereference in account statistics with IDE
md: make sure desc_nr less than MD_SB_DISKS
md: raid1: check rdev before reference in raid1_sync_request func
raid5: need to set STRIPE_HANDLE for batch head
block: fix "check bi_size overflow before merge"
nvme/pci: Fix read queue count
nvme/pci Limit write queue sizes to possible cpus
nvme/pci: Fix write and poll queue types
nvme/pci: Remove last_cq_head
nvme: Namepace identification descriptor list is optional
nvme-fc: fix double-free scenarios on hw queues
nvme: else following return is not needed
nvme: add error message on mismatching controller ids
nvme_fc: add module to ops template to allow module references
nvmet-loop: Avoid preallocating big SGL for data
nvme-fc: Avoid preallocating big SGL for data
nvme-rdma: Avoid preallocating big SGL for data
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 4 | ||||
-rw-r--r-- | block/blk-cgroup.c | 20 | ||||
-rw-r--r-- | block/blk-core.c | 5 |
3 files changed, 6 insertions, 23 deletions
diff --git a/block/bio.c b/block/bio.c index 9d54aa37ce6c..a5d75f6bf4c7 100644 --- a/block/bio.c +++ b/block/bio.c @@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return false; - if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { + if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { + if (bio->bi_iter.bi_size > UINT_MAX - len) + return false; bv->bv_len += len; bio->bi_iter.bi_size += len; return true; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 708dea92dac8..a229b94d5390 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1062,26 +1062,6 @@ err_unlock: } /** - * blkcg_drain_queue - drain blkcg part of request_queue - * @q: request_queue to drain - * - * Called from blk_drain_queue(). Responsible for draining blkcg part. - */ -void blkcg_drain_queue(struct request_queue *q) -{ - lockdep_assert_held(&q->queue_lock); - - /* - * @q could be exiting and already have destroyed all blkgs as - * indicated by NULL root_blkg. If so, don't confuse policies. - */ - if (!q->root_blkg) - return; - - blk_throtl_drain(q); -} - -/** * blkcg_exit_queue - exit and release blkcg part of request_queue * @q: request_queue being released * diff --git a/block/blk-core.c b/block/blk-core.c index e4b27f7e9f51..e0a094fddee5 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes); void blk_account_io_completion(struct request *req, unsigned int bytes) { - if (blk_do_io_stat(req)) { + if (req->part && blk_do_io_stat(req)) { const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; @@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now) * normal IO on queueing nor completion. Accounting the * containing request is enough. */ - if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { + if (req->part && blk_do_io_stat(req) && + !(req->rq_flags & RQF_FLUSH_SEQ)) { const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; |