diff options
author | Artur Paszkiewicz <artur.paszkiewicz@intel.com> | 2024-08-27 17:35:35 +0200 |
---|---|---|
committer | Song Liu <song@kernel.org> | 2024-08-29 18:37:10 +0200 |
commit | 0e4aac7366666e1377ce7669b7f63b94c1d616e6 (patch) | |
tree | 5c4c6d95e12ff6d70ab4a41d7eb1290eb57b5fee | |
parent | md/raid5: use wait_on_bit() for R5_Overlap (diff) | |
download | linux-0e4aac7366666e1377ce7669b7f63b94c1d616e6.tar.xz linux-0e4aac7366666e1377ce7669b7f63b94c1d616e6.zip |
md/raid5: only add to wq if reshape is in progress
Now that actual overlaps are not handled on the wait_for_overlap wq
anymore, the remaining cases when we wait on this wq are limited to
reshape. If reshape is not in progress, don't add to the wq in
raid5_make_request() because add_wait_queue() / remove_wait_queue()
operations take a spinlock and cause noticeable contention when multiple
threads are submitting requests to the mddev.
Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
Link: https://lore.kernel.org/r/20240827153536.6743-3-artur.paszkiewicz@intel.com
Signed-off-by: Song Liu <song@kernel.org>
-rw-r--r-- | drivers/md/raid5.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index de7d9959b3dc..e1ddfb6d8b37 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6070,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf, static bool raid5_make_request(struct mddev *mddev, struct bio * bi) { DEFINE_WAIT_FUNC(wait, woken_wake_function); + bool on_wq; struct r5conf *conf = mddev->private; sector_t logical_sector; struct stripe_request_ctx ctx = {}; @@ -6143,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) * sequential IO pattern. We don't bother with the optimization when * reshaping as the performance benefit is not worth the complexity. */ - if (likely(conf->reshape_progress == MaxSector)) + if (likely(conf->reshape_progress == MaxSector)) { logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); + on_wq = false; + } else { + add_wait_queue(&conf->wait_for_overlap, &wait); + on_wq = true; + } s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf); - add_wait_queue(&conf->wait_for_overlap, &wait); while (1) { res = make_stripe_request(mddev, conf, &ctx, logical_sector, bi); @@ -6158,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) continue; if (res == STRIPE_SCHEDULE_AND_RETRY) { + WARN_ON_ONCE(!on_wq); /* * Must release the reference to batch_last before * scheduling and waiting for work to be done, @@ -6182,7 +6188,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) logical_sector = ctx.first_sector + (s << RAID5_STRIPE_SHIFT(conf)); } - remove_wait_queue(&conf->wait_for_overlap, &wait); + if (unlikely(on_wq)) + remove_wait_queue(&conf->wait_for_overlap, &wait); if (ctx.batch_last) raid5_release_stripe(ctx.batch_last); |