summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2011-07-28 03:32:10 +0200
committerNeilBrown <neilb@suse.de>2011-07-28 03:32:10 +0200
commit2ca68f5ed7383733102ee53cd8fa4021ecc3b275 (patch)
tree29543cff802770d90380b889cf8b0cc01e7d1814
parentmd/raid1: clear bad-block record when write succeeds. (diff)
downloadlinux-2ca68f5ed7383733102ee53cd8fa4021ecc3b275.tar.xz
linux-2ca68f5ed7383733102ee53cd8fa4021ecc3b275.zip
md/raid1: store behind-write pages in bi_vecs.
When performing write-behind we allocate pages to store the data during write. Previously we just keep a list of pages. Now we keep a list of bi_vec which includes offset and size. This means that the r1bio has complete information to create a new bio which will be needed for retrying after write errors. Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Namhyung Kim <namhyung@gmail.com>
Diffstat (limited to '')
-rw-r--r--drivers/md/raid1.c33
-rw-r--r--drivers/md/raid1.h2
2 files changed, 18 insertions, 17 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 141de75a6c7c..b16d2ee5e9dd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -327,9 +327,9 @@ static void r1_bio_write_done(r1bio_t *r1_bio)
/* free extra copy of the data pages */
int i = r1_bio->behind_page_count;
while (i--)
- safe_put_page(r1_bio->behind_pages[i]);
- kfree(r1_bio->behind_pages);
- r1_bio->behind_pages = NULL;
+ safe_put_page(r1_bio->behind_bvecs[i].bv_page);
+ kfree(r1_bio->behind_bvecs);
+ r1_bio->behind_bvecs = NULL;
}
/* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
@@ -748,30 +748,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
+ struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
GFP_NOIO);
- if (unlikely(!pages))
+ if (unlikely(!bvecs))
return;
bio_for_each_segment(bvec, bio, i) {
- pages[i] = alloc_page(GFP_NOIO);
- if (unlikely(!pages[i]))
+ bvecs[i] = *bvec;
+ bvecs[i].bv_page = alloc_page(GFP_NOIO);
+ if (unlikely(!bvecs[i].bv_page))
goto do_sync_io;
- memcpy(kmap(pages[i]) + bvec->bv_offset,
- kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(pages[i]);
+ memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
+ kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
+ kunmap(bvecs[i].bv_page);
kunmap(bvec->bv_page);
}
- r1_bio->behind_pages = pages;
+ r1_bio->behind_bvecs = bvecs;
r1_bio->behind_page_count = bio->bi_vcnt;
set_bit(R1BIO_BehindIO, &r1_bio->state);
return;
do_sync_io:
for (i = 0; i < bio->bi_vcnt; i++)
- if (pages[i])
- put_page(pages[i]);
- kfree(pages);
+ if (bvecs[i].bv_page)
+ put_page(bvecs[i].bv_page);
+ kfree(bvecs);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
}
@@ -1058,7 +1059,7 @@ read_again:
&r1_bio->state));
first_clone = 0;
}
- if (r1_bio->behind_pages) {
+ if (r1_bio->behind_bvecs) {
struct bio_vec *bvec;
int j;
@@ -1070,7 +1071,7 @@ read_again:
* them all
*/
__bio_for_each_segment(bvec, mbio, j, 0)
- bvec->bv_page = r1_bio->behind_pages[j];
+ bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index f81360d49af4..b7885474e96c 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -101,7 +101,7 @@ struct r1bio_s {
struct list_head retry_list;
/* Next two are only valid when R1BIO_BehindIO is set */
- struct page **behind_pages;
+ struct bio_vec *behind_bvecs;
int behind_page_count;
/*
* if the IO is in WRITE direction, then multiple bios are used.