summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-23 04:05:17 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 07:37:33 +0200
commite4025f6c21f1389696c069be2dc647f364925c45 (patch)
tree4d7ee21062293c9a9d398063c22339b47f581283
parentblock: kill blk_start_queueing() (diff)
downloadlinux-e4025f6c21f1389696c069be2dc647f364925c45.tar.xz
linux-e4025f6c21f1389696c069be2dc647f364925c45.zip
block: don't set REQ_NOMERGE unnecessarily
RQ_NOMERGE_FLAGS already clears defines which REQ flags aren't mergeable. There is no reason to specify it superflously. It only adds to confusion. Don't set REQ_NOMERGE for barriers and requests with specific queueing directive. REQ_NOMERGE is now exclusively used by the merging code. [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-exec.c1
2 files changed, 1 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 8b4a0af7d69f..7e0fab53e930 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1082,16 +1082,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
if (bio_failfast_driver(bio))
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- /*
- * REQ_BARRIER implies no merging, but lets make it explicit
- */
if (unlikely(bio_discard(bio))) {
req->cmd_flags |= REQ_DISCARD;
if (bio_barrier(bio))
req->cmd_flags |= REQ_SOFTBARRIER;
req->q->prepare_discard_fn(req->q, req);
} else if (unlikely(bio_barrier(bio)))
- req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ req->cmd_flags |= REQ_HARDBARRIER;
if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 6af716d1e54e..49557e91f0da 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
- rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);