summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c147
1 files changed, 80 insertions, 67 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31a0cbf63384..7b820b81d8c2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -789,87 +790,80 @@ struct stripe_request_ctx {
*/
static bool is_inactive_blocked(struct r5conf *conf, int hash)
{
- int active = atomic_read(&conf->active_stripes);
-
if (list_empty(conf->inactive_list + hash))
return false;
if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return true;
- return active < (conf->max_nr_stripes * 3 / 4);
+ return (atomic_read(&conf->active_stripes) <
+ (conf->max_nr_stripes * 3 / 4));
}
-static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
struct stripe_request_ctx *ctx, sector_t sector,
- bool previous, bool noblock, bool noquiesce)
+ unsigned int flags)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
+ int previous = !!(flags & R5_GAS_PREVIOUS);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
-retry:
- if (!noquiesce && conf->quiesce) {
- /*
- * Must release the reference to batch_last before waiting,
- * on quiesce, otherwise the batch_last will hold a reference
- * to a stripe and raid5_quiesce() will deadlock waiting for
- * active_stripes to go to zero.
- */
- if (ctx && ctx->batch_last) {
- raid5_release_stripe(ctx->batch_last);
- ctx->batch_last = NULL;
- }
-
- wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
- *(conf->hash_locks + hash));
- }
+ for (;;) {
+ if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before
+ * waiting, on quiesce, otherwise the batch_last will
+ * hold a reference to a stripe and raid5_quiesce()
+ * will deadlock waiting for active_stripes to go to
+ * zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
- sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
- if (sh)
- goto out;
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- goto wait_for_stripe;
+ sh = find_get_stripe(conf, sector, conf->generation - previous,
+ hash);
+ if (sh)
+ break;
- sh = get_free_stripe(conf, hash);
- if (sh) {
- r5c_check_stripe_cache_usage(conf);
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- goto out;
- }
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ break;
+ }
- if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ }
-wait_for_stripe:
- if (noblock)
- goto out;
+ if (flags & R5_GAS_NOBLOCK)
+ break;
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- is_inactive_blocked(conf, hash),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- goto retry;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ }
-out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
-struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
- sector_t sector, bool previous, bool noblock, bool noquiesce)
-{
- return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
- noquiesce);
-}
-
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -4047,7 +4041,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
* back cache (prexor with orig_page, and then xor with
* page) in the read path
*/
- if (s->injournal && s->failed) {
+ if (s->to_read && s->injournal && s->failed) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh);
goto out;
@@ -4636,7 +4630,8 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
sector_t bn = raid5_compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
+ sh2 = raid5_get_active_stripe(conf, NULL, s,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -5273,7 +5268,9 @@ static void handle_stripe(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
- = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
+ = raid5_get_active_stripe(conf, NULL, sh->sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOBLOCK |
+ R5_GAS_NOQUIESCE);
if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
/* sh cannot be written until sh_src has been read.
* so arrange for sh to be delayed a little
@@ -5542,7 +5539,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
- bio_put(raid_bio);
rdev_dec_pending(rdev, mddev);
return 0;
}
@@ -5823,7 +5819,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
DEFINE_WAIT(w);
int d;
again:
- sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
@@ -5978,7 +5974,7 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
enum stripe_result ret;
struct stripe_head *sh;
sector_t new_sector;
- int previous = 0;
+ int previous = 0, flags = 0;
int seq, dd_idx;
seq = read_seqcount_begin(&conf->gen_lock);
@@ -6012,8 +6008,11 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
new_sector, logical_sector);
- sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
+ if (previous)
+ flags |= R5_GAS_PREVIOUS;
+ if (bi->bi_opf & REQ_RAHEAD)
+ flags |= R5_GAS_NOBLOCK;
+ sh = raid5_get_active_stripe(conf, ctx, new_sector, flags);
if (unlikely(!sh)) {
/* cannot get stripe, just give-up */
bi->bi_status = BLK_STS_IOERR;
@@ -6362,7 +6361,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
int j;
int skipped_disk = 0;
- sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
+ R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -6411,7 +6411,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, first_sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -6531,9 +6532,10 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr,
+ R5_GAS_NOBLOCK);
if (sh == NULL) {
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -6596,8 +6598,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
/* already done this stripe */
continue;
- sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
-
+ sh = raid5_get_active_stripe(conf, NULL, sector,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (!sh) {
/* failed to get a stripe - must wait */
conf->retry_read_aligned = raid_bio;
@@ -6781,7 +6783,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);