summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-05-14 19:16:41 +0200
committerPhilipp Reisner <philipp.reisner@linbit.com>2010-05-18 02:03:05 +0200
commita1c88d0d7aa2ef427f78834c9a3b0a673a19dca6 (patch)
treebe62930616401a30e3d4784a109c001323d4470a /drivers
parentdrbd: allow resync requests to be larger than max_segment_size (diff)
downloadlinux-a1c88d0d7aa2ef427f78834c9a3b0a673a19dca6.tar.xz
linux-a1c88d0d7aa2ef427f78834c9a3b0a673a19dca6.zip
drbd: always use_bmbv, ignore setting
Now that the peer may handle multi-bio EEs, we can ignore the peer's limit, and concentrate on the limits of the local IO stack. This is safe accross drbd protocol versions, as our queue_max_sectors() will be adjusted accordingly. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c2
3 files changed, 6 insertions, 5 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 28ef76bd5230..f20336bc59c8 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
int max_segments = mdev->ldev->dc.max_bio_bvecs;
- if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
- max_seg_s = PAGE_SIZE;
-
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 388a3e8bb0d0..a04ec01ab3ce 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3011,7 +3011,11 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
ldsc = 1;
}
- max_seg_s = be32_to_cpu(p->max_segment_size);
+ if (mdev->agreed_pro_version < 94)
+ max_seg_s = be32_to_cpu(p->max_segment_size);
+ else /* drbd 8.3.8 onwards */
+ max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
drbd_setup_queue_param(mdev, max_seg_s);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index d8d9bbfca3b8..343e0e6dd532 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1110,7 +1110,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
} else if (limit && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
- if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+ if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}