summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-09-24 19:09:18 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2017-10-11 23:23:42 +0200
commitd16d44ebb016792285ec1b9566dbd9d022ce70f9 (patch)
tree2f500e04dfce8a3c7e652d04fb1cd8c906c7f4bf /block
parentbio_map_user_iov(): get rid of copying iov_iter (diff)
downloadlinux-d16d44ebb016792285ec1b9566dbd9d022ce70f9.tar.xz
linux-d16d44ebb016792285ec1b9566dbd9d022ce70f9.zip
bio_copy_user_iov(): saner bio size calculation
it's a bounce buffer; we don't *care* how badly is the real source/destination fragmented, all that matters is the total size. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/block/bio.c b/block/bio.c
index 28f66e2edc53..e87f70cd528e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1201,33 +1201,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
struct bio_map_data *bmd;
struct page *page;
struct bio *bio;
- int i, ret;
- int nr_pages = 0;
+ int i = 0, ret;
+ int nr_pages;
unsigned int len = iter->count;
unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
- for (i = 0; i < iter->nr_segs; i++) {
- unsigned long uaddr;
- unsigned long end;
- unsigned long start;
-
- uaddr = (unsigned long) iter->iov[i].iov_base;
- end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- start = uaddr >> PAGE_SHIFT;
-
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
-
- nr_pages += end - start;
- }
-
- if (offset)
- nr_pages++;
-
bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
if (!bmd)
return ERR_PTR(-ENOMEM);
@@ -1242,6 +1220,10 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
bmd->iter = *iter;
bmd->iter.iov = bmd->iov;
+ nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ if (nr_pages > BIO_MAX_PAGES)
+ nr_pages = BIO_MAX_PAGES;
+
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)