diff options
author | Jens Axboe <axboe@suse.de> | 2006-08-10 08:44:47 +0200 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 20:23:37 +0200 |
commit | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (patch) | |
tree | b73d8c2b7c1bdc03d3313c108da7dfc95ee95525 /block | |
parent | [PATCH] i2c: Prevent deadlock on i2c client registration (diff) | |
download | linux-4aff5e2333c9a1609662f2091f55c3f6fffdad36.tar.xz linux-4aff5e2333c9a1609662f2091f55c3f6fffdad36.zip |
[PATCH] Split struct request ->flags into two parts
Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 26 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 101 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 6 |
4 files changed, 52 insertions, 83 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 5da56d48fbd3..ad1cc4077819 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1335,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) arq->state = AS_RQ_NEW; if (rq_data_dir(arq->request) == READ - || (arq->request->flags & REQ_RW_SYNC)) + || (arq->request->cmd_flags & REQ_RW_SYNC)) arq->is_sync = 1; else arq->is_sync = 0; diff --git a/block/elevator.c b/block/elevator.c index 9b72dc7c8a5c..4ac97b642042 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq) list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); - if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) + if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) break; if (rq->sector >= boundary) { if (pos->sector < boundary) @@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) e->ops->elevator_deactivate_req_fn(q, rq); } - rq->flags &= ~REQ_STARTED; + rq->cmd_flags &= ~REQ_STARTED; elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); } @@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) switch (where) { case ELEVATOR_INSERT_FRONT: - rq->flags |= REQ_SOFTBARRIER; + rq->cmd_flags |= REQ_SOFTBARRIER; list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_BACK: - rq->flags |= REQ_SOFTBARRIER; + rq->cmd_flags |= REQ_SOFTBARRIER; elv_drain_elevator(q); list_add_tail(&rq->queuelist, &q->queue_head); /* @@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); - rq->flags |= REQ_SORTED; + rq->cmd_flags |= REQ_SORTED; q->nr_sorted++; if (q->last_merge == NULL && rq_mergeable(rq)) q->last_merge = rq; @@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) * insertion; otherwise, requests should be requeued * in ordseq order. */ - rq->flags |= REQ_SOFTBARRIER; + rq->cmd_flags |= REQ_SOFTBARRIER; if (q->ordseq == 0) { list_add(&rq->queuelist, &q->queue_head); @@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, int plug) { if (q->ordcolor) - rq->flags |= REQ_ORDERED_COLOR; + rq->cmd_flags |= REQ_ORDERED_COLOR; - if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { /* * toggle ordered color */ @@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } - } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) where = ELEVATOR_INSERT_BACK; if (plug) @@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q) int ret; while ((rq = __elv_next_request(q)) != NULL) { - if (!(rq->flags & REQ_STARTED)) { + if (!(rq->cmd_flags & REQ_STARTED)) { elevator_t *e = q->elevator; /* @@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q) * it, a request that has been delayed should * not be passed by new incoming requests */ - rq->flags |= REQ_STARTED; + rq->cmd_flags |= REQ_STARTED; blk_add_trace_rq(q, rq, BLK_TA_ISSUE); } @@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q) q->boundary_rq = NULL; } - if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) + if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) break; ret = q->prep_rq_fn(q, rq); @@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q) nr_bytes = rq->data_len; blkdev_dequeue_request(rq); - rq->flags |= REQ_QUIET; + rq->cmd_flags |= REQ_QUIET; end_that_request_chunk(rq, 0, nr_bytes); end_that_request_last(rq, 0); } else { diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 51dc0edf76e0..9b91bb70c5ed 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq) if (rq == &q->post_flush_rq) return QUEUE_ORDSEQ_POSTFLUSH; - if ((rq->flags & REQ_ORDERED_COLOR) == - (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == + (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) return QUEUE_ORDSEQ_DRAIN; else return QUEUE_ORDSEQ_DONE; @@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which) end_io = post_flush_end_io; } + rq->cmd_flags = REQ_HARDBARRIER; rq_init(q, rq); - rq->flags = REQ_HARDBARRIER; rq->elevator_private = NULL; rq->rq_disk = q->bar_rq.rq_disk; rq->rl = NULL; @@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q, blkdev_dequeue_request(rq); q->orig_bar_rq = rq; rq = &q->bar_rq; + rq->cmd_flags = 0; rq_init(q, rq); - rq->flags = bio_data_dir(q->orig_bar_rq->bio); - rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_RW; + rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->elevator_private = NULL; rq->rl = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); @@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) } list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) @@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) struct blk_queue_tag *bqt = q->queue_tags; int tag; - if (unlikely((rq->flags & REQ_QUEUED))) { + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __FUNCTION__, rq, @@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) __set_bit(tag, bqt->tag_map); - rq->flags |= REQ_QUEUED; + rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); @@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q) printk(KERN_ERR "%s: bad tag found on list\n", __FUNCTION__); list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; } else blk_queue_end_tag(q, rq); - rq->flags &= ~REQ_STARTED; + rq->cmd_flags &= ~REQ_STARTED; __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); } } EXPORT_SYMBOL(blk_queue_invalidate_tags); -static const char * const rq_flags[] = { - "REQ_RW", - "REQ_FAILFAST", - "REQ_SORTED", - "REQ_SOFTBARRIER", - "REQ_HARDBARRIER", - "REQ_FUA", - "REQ_CMD", - "REQ_NOMERGE", - "REQ_STARTED", - "REQ_DONTPREP", - "REQ_QUEUED", - "REQ_ELVPRIV", - "REQ_PC", - "REQ_BLOCK_PC", - "REQ_SENSE", - "REQ_FAILED", - "REQ_QUIET", - "REQ_SPECIAL", - "REQ_DRIVE_CMD", - "REQ_DRIVE_TASK", - "REQ_DRIVE_TASKFILE", - "REQ_PREEMPT", - "REQ_PM_SUSPEND", - "REQ_PM_RESUME", - "REQ_PM_SHUTDOWN", - "REQ_ORDERED_COLOR", -}; - void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; - printk("%s: dev %s: flags = ", msg, - rq->rq_disk ? rq->rq_disk->disk_name : "?"); - bit = 0; - do { - if (rq->flags & (1 << bit)) - printk("%s ", rq_flags[bit]); - bit++; - } while (bit < __REQ_NR_BITS); + printk("%s: dev %s: type=%x, flags=%x\n", msg, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, + rq->cmd_flags); printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, rq->nr_sectors, rq->current_nr_sectors); printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); - if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { + if (blk_pc_request(rq)) { printk("cdb: "); for (bit = 0; bit < sizeof(rq->cmd); bit++) printk("%02x ", rq->cmd[bit]); @@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q, int nr_phys_segs = bio_phys_segments(q, bio); if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, max_sectors = q->max_sectors; if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(request_queue_t *q, struct request *rq) { - if (rq->flags & REQ_ELVPRIV) + if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); mempool_free(rq, q->rq.rq_pool); } @@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, return NULL; /* - * first three bits are identical in rq->flags and bio->bi_rw, + * first three bits are identical in rq->cmd_flags and bio->bi_rw, * see bio.h and blkdev.h */ - rq->flags = rw; + rq->cmd_flags = rw; if (priv) { if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { mempool_free(rq, q->rq.rq_pool); return NULL; } - rq->flags |= REQ_ELVPRIV; + rq->cmd_flags |= REQ_ELVPRIV; } return rq; @@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq, * must not attempt merges on this) and that it acts as a soft * barrier */ - rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->cmd_flags |= REQ_SOFTBARRIER; rq->special = data; @@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; rq->rq_disk = bd_disk; - rq->flags |= REQ_NOMERGE; + rq->cmd_flags |= REQ_NOMERGE; rq->end_io = done; WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); @@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req) */ if (rl) { int rw = rq_data_dir(req); - int priv = req->flags & REQ_ELVPRIV; + int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); @@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq) static void init_request_from_bio(struct request *req, struct bio *bio) { - req->flags |= REQ_CMD; + req->cmd_type = REQ_TYPE_FS; /* * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) */ if (bio_rw_ahead(bio) || bio_failfast(bio)) - req->flags |= REQ_FAILFAST; + req->cmd_flags |= REQ_FAILFAST; /* * REQ_BARRIER implies no merging, but lets make it explicit */ if (unlikely(bio_barrier(bio))) - req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); if (bio_sync(bio)) - req->flags |= REQ_RW_SYNC; + req->cmd_flags |= REQ_RW_SYNC; req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; @@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate, req->errors = 0; if (!uptodate) { - if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) + if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) printk("end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)req->sector); @@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request); void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) { - /* first two bits are identical in rq->flags and bio->bi_rw */ - rq->flags |= (bio->bi_rw & 3); + /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ + rq->cmd_flags |= (bio->bi_rw & 3); rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_hw_segments = bio_hw_segments(q, bio); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index b33eda26e205..2dc326421a24 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -294,7 +294,7 @@ static int sg_io(struct file *file, request_queue_t *q, rq->sense = sense; rq->sense_len = 0; - rq->flags |= REQ_BLOCK_PC; + rq->cmd_type = REQ_TYPE_BLOCK_PC; bio = rq->bio; /* @@ -470,7 +470,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q, memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; - rq->flags |= REQ_BLOCK_PC; + rq->cmd_type = REQ_TYPE_BLOCK_PC; blk_execute_rq(q, disk, rq, 0); @@ -502,7 +502,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c int err; rq = blk_get_request(q, WRITE, __GFP_WAIT); - rq->flags |= REQ_BLOCK_PC; + rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->data = NULL; rq->data_len = 0; rq->timeout = BLK_DEFAULT_TIMEOUT; |