summaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-06-27 10:55:12 +0200
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 23:33:29 +0200
commit22e2c507c301c3dbbcf91b4948b88f78842ee6c9 (patch)
tree9a97c91d1362e69703aa286021daffb8a5456f4c /drivers/block/ll_rw_blk.c
parentMerge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6 (diff)
downloadlinux-22e2c507c301c3dbbcf91b4948b88f78842ee6c9.tar.xz
linux-22e2c507c301c3dbbcf91b4948b88f78842ee6c9.zip
[PATCH] Update cfq io scheduler to time sliced design
This updates the CFQ io scheduler to the new time sliced design (cfq v3). It provides full process fairness, while giving excellent aggregate system throughput even for many competing processes. It supports io priorities, either inherited from the cpu nice value or set directly with the ioprio_get/set syscalls. The latter closely mimic set/getpriority. This import is based on my latest from -mm. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c59
1 files changed, 36 insertions, 23 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 60e64091de1b..234fdcfbdf01 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
+ rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
rq->q = q;
@@ -1442,11 +1443,7 @@ void __generic_unplug_device(request_queue_t *q)
if (!blk_remove_plug(q))
return;
- /*
- * was plugged, fire request_fn if queue has stuff to do
- */
- if (elv_next_request(q))
- q->request_fn(q);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__generic_unplug_device);
@@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
- int gfp_mask)
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, gfp_mask))
+ if (!elv_set_request(q, rq, bio, gfp_mask))
return rq;
mempool_free(rq, q->rq.rq_pool);
@@ -1872,7 +1869,8 @@ static void freed_request(request_queue_t *q, int rw)
/*
* Get a free request, queue_lock must not be held
*/
-static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+ int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
@@ -1895,7 +1893,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
}
}
- switch (elv_may_queue(q, rw)) {
+ switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
@@ -1920,7 +1918,7 @@ get_rq:
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -1961,7 +1959,8 @@ out:
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw)
+static struct request *get_request_wait(request_queue_t *q, int rw,
+ struct bio *bio)
{
DEFINE_WAIT(wait);
struct request *rq;
@@ -1972,7 +1971,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, GFP_NOIO);
+ rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
@@ -2003,9 +2002,9 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw);
+ rq = get_request_wait(q, rw, NULL);
else
- rq = get_request(q, rw, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
return rq;
}
@@ -2333,7 +2332,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
return;
req->rq_status = RQ_INACTIVE;
- req->q = NULL;
req->rl = NULL;
/*
@@ -2462,6 +2460,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
req->rq_disk->in_flight--;
}
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
__blk_put_request(q, next);
return 1;
}
@@ -2514,11 +2514,13 @@ static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+ unsigned short prio;
sector_t sector;
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
+ prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
@@ -2559,6 +2561,7 @@ again:
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req);
@@ -2583,6 +2586,7 @@ again:
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req);
@@ -2610,7 +2614,7 @@ get_rq:
freereq = NULL;
} else {
spin_unlock_irq(q->queue_lock);
- if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
+ if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
/*
* READA bit set
*/
@@ -2618,7 +2622,7 @@ get_rq:
if (bio_rw_ahead(bio))
goto end_io;
- freereq = get_request_wait(q, rw);
+ freereq = get_request_wait(q, rw, bio);
}
goto again;
}
@@ -2646,6 +2650,7 @@ get_rq:
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
+ req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
@@ -2674,7 +2679,7 @@ static inline void blk_partition_remap(struct bio *bio)
if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- switch (bio->bi_rw) {
+ switch (bio_data_dir(bio)) {
case READ:
p->read_sectors += bio_sectors(bio);
p->reads++;
@@ -2693,6 +2698,7 @@ void blk_finish_queue_drain(request_queue_t *q)
{
struct request_list *rl = &q->rq;
struct request *rq;
+ int requeued = 0;
spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
@@ -2701,9 +2707,13 @@ void blk_finish_queue_drain(request_queue_t *q)
rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist);
- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+ elv_requeue_request(q, rq);
+ requeued++;
}
+ if (requeued)
+ q->request_fn(q);
+
spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]);
@@ -2900,7 +2910,7 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
- bio->bi_rw = rw;
+ bio->bi_rw |= rw;
if (rw & WRITE)
mod_page_state(pgpgout, count);
else
@@ -3257,8 +3267,11 @@ void exit_io_context(void)
struct io_context *ioc;
local_irq_save(flags);
+ task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
+ ioc->task = NULL;
+ task_unlock(current);
local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit)
@@ -3293,12 +3306,12 @@ struct io_context *get_io_context(int gfp_flags)
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
atomic_set(&ret->refcount, 1);
- ret->pid = tsk->pid;
+ ret->task = current;
+ ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
- spin_lock_init(&ret->lock);
local_irq_save(flags);