summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorVasily Averin <vvs@sw.ru>2007-10-31 08:33:24 +0100
committerJens Axboe <axboe@carl.home.kernel.dk>2007-11-02 08:47:25 +0100
commit5ec140e600b7d6624c657f008833f0e71bd5ef48 (patch)
tree44691c88d767418a57797e84253a4633825c0c98 /drivers/md
parentDeadline iosched: Fix batching fairness (diff)
downloadlinux-5ec140e600b7d6624c657f008833f0e71bd5ef48.tar.xz
linux-5ec140e600b7d6624c657f008833f0e71bd5ef48.zip
dm: bounce_pfn limit added
Device mapper uses its own bounce_pfn that may differ from one on underlying device. In that way dm can build incorrect requests that contain sg elements greater than underlying device is able to handle. This is the cause of slab corruption in i2o layer, occurred on i386 arch when very long direct IO requests are addressed to dm-over-i2o device. Signed-off-by: Vasily Averin <vvs@sw.ru> Cc: <stable@kernel.org> Cc: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8939e6105088..5a7eb650181e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->seg_boundary_mask =
min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
+ lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
+
lhs->no_cluster |= rhs->no_cluster;
}
@@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
min_not_zero(rs->seg_boundary_mask,
q->seg_boundary_mask);
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
rs->seg_boundary_mask = -1;
+ if (!rs->bounce_pfn)
+ rs->bounce_pfn = -1;
}
int dm_table_add_target(struct dm_table *t, const char *type,
@@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
q->hardsect_size = t->limits.hardsect_size;
q->max_segment_size = t->limits.max_segment_size;
q->seg_boundary_mask = t->limits.seg_boundary_mask;
+ q->bounce_pfn = t->limits.bounce_pfn;
if (t->limits.no_cluster)
q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
else