summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-02 20:34:40 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-02 20:34:40 +0200
commit25aa6a7ae46c6a041c46a2d314b9ab7c4f2baa41 (patch)
treeb99c627c269e38450d5d0f9713862d2ed06d6e5e /drivers/md/raid5.c
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sag... (diff)
parentmd/dm-raid: DM_RAID should select MD_RAID10 (diff)
downloadlinux-25aa6a7ae46c6a041c46a2d314b9ab7c4f2baa41.tar.xz
linux-25aa6a7ae46c6a041c46a2d314b9ab7c4f2baa41.zip
Merge tag 'md-3.6' of git://neil.brown.name/md
Pull additional md update from NeilBrown: "This contains a few patches that depend on plugging changes in the block layer so needed to wait for those. It also contains a Kconfig fix for the new RAID10 support in dm-raid." * tag 'md-3.6' of git://neil.brown.name/md: md/dm-raid: DM_RAID should select MD_RAID10 md/raid1: submit IO from originating thread instead of md thread. raid5: raid5d handle stripe in batch way raid5: make_request use batch stripe release
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c107
1 files changed, 91 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 87a2d0bdedd1..adda94df5eb2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -484,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
} else {
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
- && !test_bit(STRIPE_EXPANDING, &sh->state));
+ && !test_bit(STRIPE_EXPANDING, &sh->state)
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -4010,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
return sh;
}
+struct raid5_plug_cb {
+ struct blk_plug_cb cb;
+ struct list_head list;
+};
+
+static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
+{
+ struct raid5_plug_cb *cb = container_of(
+ blk_cb, struct raid5_plug_cb, cb);
+ struct stripe_head *sh;
+ struct mddev *mddev = cb->cb.data;
+ struct r5conf *conf = mddev->private;
+
+ if (cb->list.next && !list_empty(&cb->list)) {
+ spin_lock_irq(&conf->device_lock);
+ while (!list_empty(&cb->list)) {
+ sh = list_first_entry(&cb->list, struct stripe_head, lru);
+ list_del_init(&sh->lru);
+ /*
+ * avoid race release_stripe_plug() sees
+ * STRIPE_ON_UNPLUG_LIST clear but the stripe
+ * is still in our list
+ */
+ smp_mb__before_clear_bit();
+ clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ __release_stripe(conf, sh);
+ }
+ spin_unlock_irq(&conf->device_lock);
+ }
+ kfree(cb);
+}
+
+static void release_stripe_plug(struct mddev *mddev,
+ struct stripe_head *sh)
+{
+ struct blk_plug_cb *blk_cb = blk_check_plugged(
+ raid5_unplug, mddev,
+ sizeof(struct raid5_plug_cb));
+ struct raid5_plug_cb *cb;
+
+ if (!blk_cb) {
+ release_stripe(sh);
+ return;
+ }
+
+ cb = container_of(blk_cb, struct raid5_plug_cb, cb);
+
+ if (cb->list.next == NULL)
+ INIT_LIST_HEAD(&cb->list);
+
+ if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
+ list_add_tail(&sh->lru, &cb->list);
+ else
+ release_stripe(sh);
+}
+
static void make_request(struct mddev *mddev, struct bio * bi)
{
struct r5conf *conf = mddev->private;
@@ -4138,8 +4195,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ((bi->bi_rw & REQ_NOIDLE) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
- mddev_check_plugged(mddev);
- release_stripe(sh);
+ release_stripe_plug(mddev, sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -4537,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
+#define MAX_STRIPE_BATCH 8
+static int handle_active_stripes(struct r5conf *conf)
+{
+ struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
+ int i, batch_size = 0;
+
+ while (batch_size < MAX_STRIPE_BATCH &&
+ (sh = __get_priority_stripe(conf)) != NULL)
+ batch[batch_size++] = sh;
+
+ if (batch_size == 0)
+ return batch_size;
+ spin_unlock_irq(&conf->device_lock);
+
+ for (i = 0; i < batch_size; i++)
+ handle_stripe(batch[i]);
+
+ cond_resched();
+
+ spin_lock_irq(&conf->device_lock);
+ for (i = 0; i < batch_size; i++)
+ __release_stripe(conf, batch[i]);
+ return batch_size;
+}
/*
* This is our raid5 kernel thread.
@@ -4547,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
*/
static void raid5d(struct mddev *mddev)
{
- struct stripe_head *sh;
struct r5conf *conf = mddev->private;
int handled;
struct blk_plug plug;
@@ -4561,6 +4640,7 @@ static void raid5d(struct mddev *mddev)
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
+ int batch_size;
if (
!list_empty(&conf->bitmap_list)) {
@@ -4584,21 +4664,16 @@ static void raid5d(struct mddev *mddev)
handled++;
}
- sh = __get_priority_stripe(conf);
-
- if (!sh)
+ batch_size = handle_active_stripes(conf);
+ if (!batch_size)
break;
- spin_unlock_irq(&conf->device_lock);
-
- handled++;
- handle_stripe(sh);
- release_stripe(sh);
- cond_resched();
+ handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
-
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&conf->device_lock);
+ }
}
pr_debug("%d stripes handled\n", handled);