summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorshli@kernel.org <shli@kernel.org>2014-12-15 02:57:03 +0100
committerNeilBrown <neilb@suse.de>2015-04-22 00:00:41 +0200
commitda41ba65972532a04f73927c903029a7ec3bc2ed (patch)
treef259e39b2d333729e0e1cdafbe952f21aaaf0d3f /drivers/md/raid5.c
parentraid5: use flex_array for scribble data (diff)
downloadlinux-da41ba65972532a04f73927c903029a7ec3bc2ed.tar.xz
linux-da41ba65972532a04f73927c903029a7ec3bc2ed.zip
raid5: add a new flag to track if a stripe can be batched
A freshly new stripe with write request can be batched. Any time the stripe is handled or new read is queued, the flag will be cleared. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7fb510e54548..49b0f23dbad2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -555,6 +555,7 @@ retry:
goto retry;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
+ set_bit(STRIPE_BATCH_READY, &sh->state);
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -2645,7 +2646,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
* toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
+static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
+ int forwrite, int previous)
{
struct bio **bip;
struct r5conf *conf = sh->raid_conf;
@@ -2678,6 +2680,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
+ if (!forwrite || previous)
+ clear_bit(STRIPE_BATCH_READY, &sh->state);
+
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
@@ -3824,6 +3829,7 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
+ clear_bit(STRIPE_BATCH_READY, &sh->state);
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */
@@ -4793,7 +4799,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw)) {
+ !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -5206,7 +5212,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
+ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;