summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2013-12-11 20:01:20 +0100
committerMike Snitzer <snitzer@redhat.com>2014-01-07 16:14:25 +0100
commitdaec338bbdaa96ba5b14c4777603e65ef74c769b (patch)
treea0798a5cddec99ad1bd7cda0b2b9a33164032e24
parentdm thin: return error from alloc_data_block if pool is not in write mode (diff)
downloadlinux-daec338bbdaa96ba5b14c4777603e65ef74c769b.tar.xz
linux-daec338bbdaa96ba5b14c4777603e65ef74c769b.zip
dm thin: add mappings to end of prepared_* lists
Mappings could be processed in descending logical block order, particularly if buffered IO is used. This could adversely affect the latency of IO processing. Fix this by adding mappings to the end of the 'prepared_mappings' and 'prepared_discards' lists. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com>
-rw-r--r--drivers/md/dm-thin.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1988019df5c9..efa3d42ac70a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -535,7 +535,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
struct pool *pool = m->tc->pool;
if (m->quiesced && m->prepared) {
- list_add(&m->list, &pool->prepared_mappings);
+ list_add_tail(&m->list, &pool->prepared_mappings);
wake_worker(pool);
}
}
@@ -1058,7 +1058,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
spin_lock_irqsave(&pool->lock, flags);
- list_add(&m->list, &pool->prepared_discards);
+ list_add_tail(&m->list, &pool->prepared_discards);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
}
@@ -2919,7 +2919,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
if (!list_empty(&work)) {
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &work, list)
- list_add(&m->list, &pool->prepared_discards);
+ list_add_tail(&m->list, &pool->prepared_discards);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
}