summaryrefslogtreecommitdiffstats
path: root/drivers/block/xd.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-08 04:54:10 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 09:52:16 +0200
commitbab2a807a489822ded0c9d4a5344c80bcac10b0a (patch)
treecf93330ae12f820d1dd53bc9d484fa6cad38962c /drivers/block/xd.c
parentswim: dequeue in-flight request (diff)
downloadlinux-bab2a807a489822ded0c9d4a5344c80bcac10b0a.tar.xz
linux-bab2a807a489822ded0c9d4a5344c80bcac10b0a.zip
xd: dequeue in-flight request
xd processes requests one-by-one synchronously and can be easily converted to dequeueing model. Convert it. While at it, use rq_cur_bytes instead of rq_bytes when checking for sector overflow. This is for for consistency and better behavior for merged requests. [ Impact: dequeue in-flight request ] Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/xd.c')
-rw-r--r--drivers/block/xd.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 4ef88018bcde..d4c4352354b5 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,26 +305,31 @@ static void do_xd_request (struct request_queue * q)
if (xdc_busy)
return;
- while ((req = elv_next_request(q)) != NULL) {
+ req = elv_next_request(q);
+ if (req)
+ blkdev_dequeue_request(req);
+
+ while (req) {
unsigned block = blk_rq_pos(req);
- unsigned count = blk_rq_sectors(req);
+ unsigned count = blk_rq_cur_sectors(req);
XD_INFO *disk = req->rq_disk->private_data;
- int res = 0;
+ int res = -EIO;
int retry;
- if (!blk_fs_request(req)) {
- __blk_end_request_cur(req, -EIO);
- continue;
- }
- if (block + count > get_capacity(req->rq_disk)) {
- __blk_end_request_cur(req, -EIO);
- continue;
- }
+ if (!blk_fs_request(req))
+ goto done;
+ if (block + count > get_capacity(req->rq_disk))
+ goto done;
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
block, count);
+ done:
/* wrap up, 0 = success, -errno = fail */
- __blk_end_request_cur(req, res);
+ if (!__blk_end_request_cur(req, res)) {
+ req = elv_next_request(q);
+ if (req)
+ blkdev_dequeue_request(req);
+ }
}
}