diff options
author | Jens Axboe <axboe@suse.de> | 2006-06-13 08:26:10 +0200 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-06-23 17:10:39 +0200 |
commit | b31dc66a54ad986b6b73bdc49c8efc17cbad1833 (patch) | |
tree | 5591383c1cbffe11512da889c971f899333f1a44 /block | |
parent | [PATCH] cfq-iosched: Don't set the queue batching limits (diff) | |
download | linux-b31dc66a54ad986b6b73bdc49c8efc17cbad1833.tar.xz linux-b31dc66a54ad986b6b73bdc49c8efc17cbad1833.zip |
[PATCH] Kill PF_SYNCWRITE flag
A process flag to indicate whether we are doing sync io is incredibly
ugly. It also causes performance problems when one does a lot of async
io and then proceeds to sync it. Part of the io will go out as async,
and the other part as sync. This causes a disconnect between the
previously submitted io and the synced io. For io schedulers such as CFQ,
this will cause us lost merges and suboptimal behaviour in scheduling.
Remove PF_SYNCWRITE completely from the fsync/msync paths, and let
the O_DIRECT path just directly indicate that the writes are sync
by using WRITE_SYNC instead.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 4 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 3 |
3 files changed, 5 insertions, 4 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 9b13d72ffefa..56c99fa037df 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1339,7 +1339,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) arq->state = AS_RQ_NEW; if (rq_data_dir(arq->request) == READ - || current->flags&PF_SYNCWRITE) + || (arq->request->flags & REQ_RW_SYNC)) arq->is_sync = 1; else arq->is_sync = 0; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c88f161d3fb3..4c4e9cc3ae26 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -277,8 +277,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); -#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) - /* * lots of deadline iosched dupes, can be abstracted later... */ @@ -334,7 +332,7 @@ static int cfq_queue_empty(request_queue_t *q) static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) { - if (rw == READ || process_sync(task)) + if (rw == READ || rw == WRITE_SYNC) return task->pid; return CFQ_KEY_ASYNC; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 17c42ddd31db..2270bb451385 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2827,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); |