diff options
84 files changed, 529 insertions, 509 deletions
diff --git a/Documentation/block/barrier.txt b/Documentation/block/barrier.txt index 7d279f2f5bb2..2c2f24f634e4 100644 --- a/Documentation/block/barrier.txt +++ b/Documentation/block/barrier.txt @@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is used to indicate the whole sequence of performing barrier requests including draining and flushing. -typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq); +typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq); -int blk_queue_ordered(request_queue_t *q, unsigned ordered, +int blk_queue_ordered(struct request_queue *q, unsigned ordered, prepare_flush_fn *prepare_flush_fn); @q : the queue in question @@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered, For example, SCSI disk driver's prepare_flush_fn looks like the following. -static void sd_prepare_flush(request_queue_t *q, struct request *rq) +static void sd_prepare_flush(struct request_queue *q, struct request *rq) { memset(rq->cmd, 0, sizeof(rq->cmd)); rq->cmd_type = REQ_TYPE_BLOCK_PC; diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 3adaace328a6..8af392fc6ef0 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command queueing (typically known as tagged command queueing), ie manage more than one outstanding command on a queue at any given time. - blk_queue_init_tags(request_queue_t *q, int depth) + blk_queue_init_tags(struct request_queue *q, int depth) Initialize internal command tagging structures for a maximum depth of 'depth'. - blk_queue_free_tags((request_queue_t *q) + blk_queue_free_tags((struct request_queue *q) Teardown tag info associated with the queue. This will be done automatically by block if blk_queue_cleanup() is called on a queue @@ -754,7 +754,7 @@ one outstanding command on a queue at any given time. The above are initialization and exit management, the main helpers during normal operations are: - blk_queue_start_tag(request_queue_t *q, struct request *rq) + blk_queue_start_tag(struct request_queue *q, struct request *rq) Start tagged operation for this request. A free tag number between 0 and 'depth' is assigned to the request (rq->tag holds this number), @@ -762,7 +762,7 @@ normal operations are: for this queue is already achieved (or if the tag wasn't started for some other reason), 1 is returned. Otherwise 0 is returned. - blk_queue_end_tag(request_queue_t *q, struct request *rq) + blk_queue_end_tag(struct request_queue *q, struct request *rq) End tagged operation on this request. 'rq' is removed from the internal book keeping structures. @@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both the hardware and software block queue and enable the driver to sanely restart all the outstanding requests. There's a third helper to do that: - blk_queue_invalidate_tags(request_queue_t *q) + blk_queue_invalidate_tags(struct request_queue *q) Clear the internal block tag queue and re-add all the pending requests to the request queue. The driver will receive them again on the diff --git a/Documentation/block/request.txt b/Documentation/block/request.txt index 75924e2a6975..fff58acb40a3 100644 --- a/Documentation/block/request.txt +++ b/Documentation/block/request.txt @@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request struct bio *biotail DBI Last bio in request -request_queue_t *q DB Request queue this request belongs to +struct request_queue *q DB Request queue this request belongs to struct request_list *rl B Request list this request came from diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt index 09a1bafe2528..b963c3b4afa5 100644 --- a/Documentation/iostats.txt +++ b/Documentation/iostats.txt @@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing measured from __make_request() to end_that_request_last()). Field 9 -- # of I/Os currently in progress The only field that should go to zero. Incremented as requests are - given to appropriate request_queue_t and decremented as they finish. + given to appropriate struct request_queue and decremented as they finish. Field 10 -- # of milliseconds spent doing I/Os This field is increases so long as field 9 is nonzero. Field 11 -- weighted # of milliseconds spent doing I/Os diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index de7e6ef48bd0..0360b1f14d11 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work) /* * Mailbox interrupt handler */ -static void mbox_txq_fn(request_queue_t * q) +static void mbox_txq_fn(struct request_queue * q) { } -static void mbox_rxq_fn(request_queue_t * q) +static void mbox_rxq_fn(struct request_queue * q) { } @@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) { struct request *rq; mbox_msg_t msg; - request_queue_t *q = mbox->rxq->queue; + struct request_queue *q = mbox->rxq->queue; disable_mbox_irq(mbox, IRQ_RX); @@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, request_fn_proc * proc, void (*work) (struct work_struct *)) { - request_queue_t *q; + struct request_queue *q; struct omap_mbox_queue *mq; mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index fc27f6c72b41..aff661fe2ee1 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -469,7 +469,7 @@ __uml_help(fakehd, " Change the ubd device name to \"hd\".\n\n" ); -static void do_ubd_request(request_queue_t * q); +static void do_ubd_request(struct request_queue * q); /* Only changed by ubd_init, which is an initcall. */ int thread_fd = -1; @@ -1081,7 +1081,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req, } /* Called with dev->lock held */ -static void do_ubd_request(request_queue_t *q) +static void do_ubd_request(struct request_queue *q) { struct io_thread_req *io_req; struct request *req; diff --git a/block/as-iosched.c b/block/as-iosched.c index 3e316dd72529..dc715a562e14 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad) * as_completed_request is to be called when a request has completed and * returned something to the requesting process, be it an error or data. */ -static void as_completed_request(request_queue_t *q, struct request *rq) +static void as_completed_request(struct request_queue *q, struct request *rq) { struct as_data *ad = q->elevator->elevator_data; @@ -853,7 +853,8 @@ out: * reference unless it replaces the request at somepart of the elevator * (ie. the dispatch queue) */ -static void as_remove_queued_request(request_queue_t *q, struct request *rq) +static void as_remove_queued_request(struct request_queue *q, + struct request *rq) { const int data_dir = rq_is_sync(rq); struct as_data *ad = q->elevator->elevator_data; @@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq) * read/write expire, batch expire, etc, and moves it to the dispatch * queue. Returns 1 if a request was found, 0 otherwise. */ -static int as_dispatch_request(request_queue_t *q, int force) +static int as_dispatch_request(struct request_queue *q, int force) { struct as_data *ad = q->elevator->elevator_data; const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); @@ -1139,7 +1140,7 @@ fifo_expired: /* * add rq to rbtree and fifo */ -static void as_add_request(request_queue_t *q, struct request *rq) +static void as_add_request(struct request_queue *q, struct request *rq) { struct as_data *ad = q->elevator->elevator_data; int data_dir; @@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) RQ_SET_STATE(rq, AS_RQ_QUEUED); } -static void as_activate_request(request_queue_t *q, struct request *rq) +static void as_activate_request(struct request_queue *q, struct request *rq) { WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); RQ_SET_STATE(rq, AS_RQ_REMOVED); @@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq) atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); } -static void as_deactivate_request(request_queue_t *q, struct request *rq) +static void as_deactivate_request(struct request_queue *q, struct request *rq) { WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); RQ_SET_STATE(rq, AS_RQ_DISPATCHED); @@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq) * is not empty - it is used in the block layer to check for plugging and * merging opportunities */ -static int as_queue_empty(request_queue_t *q) +static int as_queue_empty(struct request_queue *q) { struct as_data *ad = q->elevator->elevator_data; @@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q) } static int -as_merge(request_queue_t *q, struct request **req, struct bio *bio) +as_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct as_data *ad = q->elevator->elevator_data; sector_t rb_key = bio->bi_sector + bio_sectors(bio); @@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; } -static void as_merged_request(request_queue_t *q, struct request *req, int type) +static void as_merged_request(struct request_queue *q, struct request *req, + int type) { struct as_data *ad = q->elevator->elevator_data; @@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type) } } -static void as_merged_requests(request_queue_t *q, struct request *req, +static void as_merged_requests(struct request_queue *q, struct request *req, struct request *next) { /* @@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work) spin_unlock_irqrestore(q->queue_lock, flags); } -static int as_may_queue(request_queue_t *q, int rw) +static int as_may_queue(struct request_queue *q, int rw) { int ret = ELV_MQUEUE_MAY; struct as_data *ad = q->elevator->elevator_data; @@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e) /* * initialize elevator private data (as_data). */ -static void *as_init_queue(request_queue_t *q) +static void *as_init_queue(struct request_queue *q) { struct as_data *ad; diff --git a/block/blktrace.c b/block/blktrace.c index 3f0e7c37c059..20c3e22587b5 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) kfree(bt); } -static int blk_trace_remove(request_queue_t *q) +static int blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; @@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = { /* * Setup everything required to start tracing */ -static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, +static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; @@ -401,7 +401,7 @@ err: return ret; } -static int blk_trace_startstop(request_queue_t *q, int start) +static int blk_trace_startstop(struct request_queue *q, int start) { struct blk_trace *bt; int ret; @@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start) **/ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) { - request_queue_t *q; + struct request_queue *q; int ret, start = 0; q = bdev_get_queue(bdev); @@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) * @q: the request queue associated with the device * **/ -void blk_trace_shutdown(request_queue_t *q) +void blk_trace_shutdown(struct request_queue *q) { if (q->blk_trace) { blk_trace_startstop(q, 0); diff --git a/block/bsg.c b/block/bsg.c index b571869928a8..3b2f05258a92 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -37,7 +37,7 @@ #define BSG_VERSION "0.4" struct bsg_device { - request_queue_t *queue; + struct request_queue *queue; spinlock_t lock; struct list_head busy_list; struct list_head done_list; @@ -180,7 +180,7 @@ unlock: return ret; } -static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, +static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, int has_write_perm) { memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ @@ -214,7 +214,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, * Check if sg_io_v4 from user is allowed and valid */ static int -bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) +bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) { int ret = 0; @@ -250,7 +250,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) static struct request * bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) { - request_queue_t *q = bd->queue; + struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; int ret, rw; unsigned int dxfer_len; @@ -345,7 +345,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */ -static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, +static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { rq->sense = bc->sense; @@ -611,7 +611,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, bc = NULL; ret = 0; while (nr_commands) { - request_queue_t *q = bd->queue; + struct request_queue *q = bd->queue; bc = bsg_alloc_command(bd); if (IS_ERR(bc)) { diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d148ccbc36d1..54dc05439009 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -71,7 +71,7 @@ struct cfq_rb_root { * Per block device queue structure */ struct cfq_data { - request_queue_t *queue; + struct request_queue *queue; /* * rr list of queues with requests and the count of them @@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); #undef CFQ_CFQQ_FNS -static void cfq_dispatch_insert(request_queue_t *, struct request *); +static void cfq_dispatch_insert(struct request_queue *, struct request *); static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, struct task_struct *, gfp_t); static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, @@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) kblockd_schedule_work(&cfqd->unplug_work); } -static int cfq_queue_empty(request_queue_t *q) +static int cfq_queue_empty(struct request_queue *q) { struct cfq_data *cfqd = q->elevator->elevator_data; @@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) return NULL; } -static void cfq_activate_request(request_queue_t *q, struct request *rq) +static void cfq_activate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; @@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq) cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; } -static void cfq_deactivate_request(request_queue_t *q, struct request *rq) +static void cfq_deactivate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; @@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq) } } -static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) +static int cfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; @@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; } -static void cfq_merged_request(request_queue_t *q, struct request *req, +static void cfq_merged_request(struct request_queue *q, struct request *req, int type) { if (type == ELEVATOR_FRONT_MERGE) { @@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req, } static void -cfq_merged_requests(request_queue_t *q, struct request *rq, +cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { /* @@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, cfq_remove_request(next); } -static int cfq_allow_merge(request_queue_t *q, struct request *rq, +static int cfq_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; @@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) /* * Move request from internal lists to the request queue dispatch list. */ -static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) +static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) return dispatched; } -static int cfq_dispatch_requests(request_queue_t *q, int force) +static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; @@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) struct cfq_data *cfqd = cic->key; if (cfqd) { - request_queue_t *q = cfqd->queue; + struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); __cfq_exit_single_io_context(cfqd, cic); @@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, } } -static void cfq_insert_request(request_queue_t *q, struct request *rq) +static void cfq_insert_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); @@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) cfq_rq_enqueued(cfqd, cfqq, rq); } -static void cfq_completed_request(request_queue_t *q, struct request *rq) +static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; @@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq) return ELV_MQUEUE_MAY; } -static int cfq_may_queue(request_queue_t *q, int rw) +static int cfq_may_queue(struct request_queue *q, int rw) { struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; @@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq) * Allocate cfq data structures associated with this request. */ static int -cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) +cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; @@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work) { struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); - request_queue_t *q = cfqd->queue; + struct request_queue *q = cfqd->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); @@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) static void cfq_exit_queue(elevator_t *e) { struct cfq_data *cfqd = e->elevator_data; - request_queue_t *q = cfqd->queue; + struct request_queue *q = cfqd->queue; cfq_shutdown_timer_wq(cfqd); @@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e) kfree(cfqd); } -static void *cfq_init_queue(request_queue_t *q) +static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 87ca02ac84cb..1a511ffaf8a4 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq) /* * remove rq from rbtree and fifo. */ -static void deadline_remove_request(request_queue_t *q, struct request *rq) +static void deadline_remove_request(struct request_queue *q, struct request *rq) { struct deadline_data *dd = q->elevator->elevator_data; @@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq) } static int -deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) +deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; struct request *__rq; @@ -144,8 +144,8 @@ out: return ret; } -static void deadline_merged_request(request_queue_t *q, struct request *req, - int type) +static void deadline_merged_request(struct request_queue *q, + struct request *req, int type) { struct deadline_data *dd = q->elevator->elevator_data; @@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req, } static void -deadline_merged_requests(request_queue_t *q, struct request *req, +deadline_merged_requests(struct request_queue *q, struct request *req, struct request *next) { /* @@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req, static inline void deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; deadline_remove_request(q, rq); elv_dispatch_add_tail(q, rq); @@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) * deadline_dispatch_requests selects the best request according to * read/write expire, fifo_batch, etc */ -static int deadline_dispatch_requests(request_queue_t *q, int force) +static int deadline_dispatch_requests(struct request_queue *q, int force) { struct deadline_data *dd = q->elevator->elevator_data; const int reads = !list_empty(&dd->fifo_list[READ]); @@ -335,7 +335,7 @@ dispatch_request: return 1; } -static int deadline_queue_empty(request_queue_t *q) +static int deadline_queue_empty(struct request_queue *q) { struct deadline_data *dd = q->elevator->elevator_data; @@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e) /* * initialize elevator private data (deadline_data). */ -static void *deadline_init_queue(request_queue_t *q) +static void *deadline_init_queue(struct request_queue *q) { struct deadline_data *dd; diff --git a/block/elevator.c b/block/elevator.c index d265963d1ed3..c6d153de9fd6 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -56,7 +56,7 @@ static const int elv_hash_shift = 6; */ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; elevator_t *e = q->elevator; if (e->ops->elevator_allow_merge_fn) @@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name) return e; } -static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) +static void *elevator_init_queue(struct request_queue *q, + struct elevator_queue *eq) { return eq->ops->elevator_init_fn(q); } -static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, +static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, void *data) { q->elevator = eq; @@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup); static struct kobj_type elv_ktype; -static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) +static elevator_t *elevator_alloc(struct request_queue *q, + struct elevator_type *e) { elevator_t *eq; int i; @@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj) kfree(e); } -int elevator_init(request_queue_t *q, char *name) +int elevator_init(struct request_queue *q, char *name) { struct elevator_type *e = NULL; struct elevator_queue *eq; @@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e) EXPORT_SYMBOL(elevator_exit); -static void elv_activate_rq(request_queue_t *q, struct request *rq) +static void elv_activate_rq(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq) e->ops->elevator_activate_req_fn(q, rq); } -static void elv_deactivate_rq(request_queue_t *q, struct request *rq) +static void elv_deactivate_rq(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq) hlist_del_init(&rq->hash); } -static void elv_rqhash_del(request_queue_t *q, struct request *rq) +static void elv_rqhash_del(struct request_queue *q, struct request *rq) { if (ELV_ON_HASH(rq)) __elv_rqhash_del(rq); } -static void elv_rqhash_add(request_queue_t *q, struct request *rq) +static void elv_rqhash_add(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq) hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); } -static void elv_rqhash_reposition(request_queue_t *q, struct request *rq) +static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) { __elv_rqhash_del(rq); elv_rqhash_add(q, rq); } -static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset) +static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) { elevator_t *e = q->elevator; struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; @@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find); * entry. rq is sort insted into the dispatch queue. To be used by * specific elevators. */ -void elv_dispatch_sort(request_queue_t *q, struct request *rq) +void elv_dispatch_sort(struct request_queue *q, struct request *rq) { sector_t boundary; struct list_head *entry; @@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) EXPORT_SYMBOL(elv_dispatch_add_tail); -int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) +int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) { elevator_t *e = q->elevator; struct request *__rq; @@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; } -void elv_merged_request(request_queue_t *q, struct request *rq, int type) +void elv_merged_request(struct request_queue *q, struct request *rq, int type) { elevator_t *e = q->elevator; @@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type) q->last_merge = rq; } -void elv_merge_requests(request_queue_t *q, struct request *rq, +void elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) { elevator_t *e = q->elevator; @@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, q->last_merge = rq; } -void elv_requeue_request(request_queue_t *q, struct request *rq) +void elv_requeue_request(struct request_queue *q, struct request *rq) { /* * it already went through dequeue, we need to decrement the @@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); } -static void elv_drain_elevator(request_queue_t *q) +static void elv_drain_elevator(struct request_queue *q) { static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) @@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q) } } -void elv_insert(request_queue_t *q, struct request *rq, int where) +void elv_insert(struct request_queue *q, struct request *rq, int where) { struct list_head *pos; unsigned ordseq; @@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) } } -void __elv_add_request(request_queue_t *q, struct request *rq, int where, +void __elv_add_request(struct request_queue *q, struct request *rq, int where, int plug) { if (q->ordcolor) @@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, EXPORT_SYMBOL(__elv_add_request); -void elv_add_request(request_queue_t *q, struct request *rq, int where, +void elv_add_request(struct request_queue *q, struct request *rq, int where, int plug) { unsigned long flags; @@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, EXPORT_SYMBOL(elv_add_request); -static inline struct request *__elv_next_request(request_queue_t *q) +static inline struct request *__elv_next_request(struct request_queue *q) { struct request *rq; @@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q) } } -struct request *elv_next_request(request_queue_t *q) +struct request *elv_next_request(struct request_queue *q) { struct request *rq; int ret; @@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q) EXPORT_SYMBOL(elv_next_request); -void elv_dequeue_request(request_queue_t *q, struct request *rq) +void elv_dequeue_request(struct request_queue *q, struct request *rq) { BUG_ON(list_empty(&rq->queuelist)); BUG_ON(ELV_ON_HASH(rq)); @@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq) EXPORT_SYMBOL(elv_dequeue_request); -int elv_queue_empty(request_queue_t *q) +int elv_queue_empty(struct request_queue *q) { elevator_t *e = q->elevator; @@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q) EXPORT_SYMBOL(elv_queue_empty); -struct request *elv_latter_request(request_queue_t *q, struct request *rq) +struct request *elv_latter_request(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq) return NULL; } -struct request *elv_former_request(request_queue_t *q, struct request *rq) +struct request *elv_former_request(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) return NULL; } -int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) +int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { elevator_t *e = q->elevator; @@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) return 0; } -void elv_put_request(request_queue_t *q, struct request *rq) +void elv_put_request(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq) e->ops->elevator_put_req_fn(rq); } -int elv_may_queue(request_queue_t *q, int rw) +int elv_may_queue(struct request_queue *q, int rw) { elevator_t *e = q->elevator; @@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw) return ELV_MQUEUE_MAY; } -void elv_completed_request(request_queue_t *q, struct request *rq) +void elv_completed_request(struct request_queue *q, struct request *rq) { elevator_t *e = q->elevator; @@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); * need for the new one. this way we have a chance of going back to the old * one, if the new one fails init for some reason. */ -static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) +static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) { elevator_t *old_elevator, *e; void *data; @@ -1078,7 +1080,8 @@ fail_register: return 0; } -ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) +ssize_t elv_iosched_store(struct request_queue *q, const char *name, + size_t count) { char elevator_name[ELV_NAME_MAX]; size_t len; @@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) return count; } -ssize_t elv_iosched_show(request_queue_t *q, char *name) +ssize_t elv_iosched_show(struct request_queue *q, char *name) { elevator_t *e = q->elevator; struct elevator_type *elv = e->elevator_type; @@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) return len; } -struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) +struct request *elv_rb_former_request(struct request_queue *q, + struct request *rq) { struct rb_node *rbprev = rb_prev(&rq->rb_node); @@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) EXPORT_SYMBOL(elv_rb_former_request); -struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq) +struct request *elv_rb_latter_request(struct request_queue *q, + struct request *rq) { struct rb_node *rbnext = rb_next(&rq->rb_node); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 66056ca5e631..8c2caff87cc3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); -static int __make_request(request_queue_t *q, struct bio *bio); +static int __make_request(struct request_queue *q, struct bio *bio); static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* @@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q) struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) { struct backing_dev_info *ret = NULL; - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); if (q) ret = &q->backing_dev_info; @@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info); * cdb from the request data for instance. * */ -void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) +void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } @@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq); * no merge_bvec_fn is defined for a queue, and only the fixed limits are * honored. */ -void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) +void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } EXPORT_SYMBOL(blk_queue_merge_bvec); -void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) +void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } @@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ -void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) +void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) { /* * set defaults @@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) EXPORT_SYMBOL(blk_queue_make_request); -static void rq_init(request_queue_t *q, struct request *rq) +static void rq_init(struct request_queue *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); @@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq) * feature should call this function and indicate so. * **/ -int blk_queue_ordered(request_queue_t *q, unsigned ordered, +int blk_queue_ordered(struct request_queue *q, unsigned ordered, prepare_flush_fn *prepare_flush_fn) { if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && @@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered); * to the block layer by defining it through this call. * **/ -void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) +void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff) { q->issue_flush_fn = iff; } @@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn); /* * Cache flushing for ordered writes handling */ -inline unsigned blk_ordered_cur_seq(request_queue_t *q) +inline unsigned blk_ordered_cur_seq(struct request_queue *q) { if (!q->ordseq) return 0; @@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q) unsigned blk_ordered_req_seq(struct request *rq) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; BUG_ON(q->ordseq == 0); @@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq) return QUEUE_ORDSEQ_DONE; } -void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) +void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) { struct request *rq; int uptodate; @@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error) blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); } -static void queue_flush(request_queue_t *q, unsigned which) +static void queue_flush(struct request_queue *q, unsigned which) { struct request *rq; rq_end_io_fn *end_io; @@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which) elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } -static inline struct request *start_ordered(request_queue_t *q, +static inline struct request *start_ordered(struct request_queue *q, struct request *rq) { q->bi_size = 0; @@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q, return rq; } -int blk_do_ordered(request_queue_t *q, struct request **rqp) +int blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); @@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) { - request_queue_t *q = bio->bi_private; + struct request_queue *q = bio->bi_private; /* * This is dry run, restore bio_sector and size. We'll finish @@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) static int ordered_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; bio_end_io_t *endio; void *private; @@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio, * blk_queue_bounce_limit to have lower memory pages allocated as bounce * buffers for doing I/O to pages residing above @page. **/ -void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) +void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) { unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; int dma = 0; @@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); * Enables a low level driver to set an upper limit on the size of * received requests. **/ -void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) +void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); @@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors); * physical data segments in a request. This would be the largest sized * scatter list the driver could handle. **/ -void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) +void blk_queue_max_phys_segments(struct request_queue *q, + unsigned short max_segments) { if (!max_segments) { max_segments = 1; @@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments); * address/length pairs the host adapter can actually give as once * to the device. **/ -void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) +void blk_queue_max_hw_segments(struct request_queue *q, + unsigned short max_segments) { if (!max_segments) { max_segments = 1; @@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments); * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ -void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) +void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; @@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); * even internal read-modify-write operations). Usually the default * of 512 covers most hardware. **/ -void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) +void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) { q->hardsect_size = size; } @@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); * @t: the stacking driver (top) * @b: the underlying device (bottom) **/ -void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) +void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { /* zero is "infinity" */ t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); @@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * @q: the request queue for the device * @mask: the memory boundary mask **/ -void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) +void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; @@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); * this is used when buiding direct io requests for the queue. * **/ -void blk_queue_dma_alignment(request_queue_t *q, int mask) +void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } @@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment); * * no locks need be held. **/ -struct request *blk_queue_find_tag(request_queue_t *q, int tag) +struct request *blk_queue_find_tag(struct request_queue *q, int tag) { return blk_map_queue_find_tag(q->queue_tags, tag); } @@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) * blk_cleanup_queue() will take care of calling this function, if tagging * has been used. So there's no need to call this directly. **/ -static void __blk_queue_free_tags(request_queue_t *q) +static void __blk_queue_free_tags(struct request_queue *q) { struct blk_queue_tag *bqt = q->queue_tags; @@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags); * This is used to disabled tagged queuing to a device, yet leave * queue in function. **/ -void blk_queue_free_tags(request_queue_t *q) +void blk_queue_free_tags(struct request_queue *q) { clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); } @@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q) EXPORT_SYMBOL(blk_queue_free_tags); static int -init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) +init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) { struct request **tag_index; unsigned long *tag_map; @@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags); * @depth: the maximum queue depth supported * @tags: the tag to use **/ -int blk_queue_init_tags(request_queue_t *q, int depth, +int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) { int rc; @@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags); * Notes: * Must be called with the queue lock held. **/ -int blk_queue_resize_tags(request_queue_t *q, int new_depth) +int blk_queue_resize_tags(struct request_queue *q, int new_depth) { struct blk_queue_tag *bqt = q->queue_tags; struct request **tag_index; @@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); * Notes: * queue lock must be held. **/ -void blk_queue_end_tag(request_queue_t *q, struct request *rq) +void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag = rq->tag; @@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); * Notes: * queue lock must be held. **/ -int blk_queue_start_tag(request_queue_t *q, struct request *rq) +int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag; @@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag); * Notes: * queue lock must be held. **/ -void blk_queue_invalidate_tags(request_queue_t *q) +void blk_queue_invalidate_tags(struct request_queue *q) { struct blk_queue_tag *bqt = q->queue_tags; struct list_head *tmp, *n; @@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) EXPORT_SYMBOL(blk_dump_rq_flags); -void blk_recount_segments(request_queue_t *q, struct bio *bio) +void blk_recount_segments(struct request_queue *q, struct bio *bio) { struct bio_vec *bv, *bvprv = NULL; int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; @@ -1267,7 +1269,7 @@ new_hw_segment: } EXPORT_SYMBOL(blk_recount_segments); -static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, +static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) @@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, return 0; } -static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, +static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) @@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries */ -int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sg) { struct bio_vec *bvec, *bvprv; struct bio *bio; @@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg); * specific ones if so desired */ -static inline int ll_new_mergeable(request_queue_t *q, +static inline int ll_new_mergeable(struct request_queue *q, struct request *req, struct bio *bio) { @@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q, return 1; } -static inline int ll_new_hw_segment(request_queue_t *q, +static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) { @@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) +int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) } EXPORT_SYMBOL(ll_back_merge_fn); -static int ll_front_merge_fn(request_queue_t *q, struct request *req, +static int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short max_sectors; @@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } -static int ll_merge_requests_fn(request_queue_t *q, struct request *req, +static int ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) { int total_phys_segments; @@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, * This is called with interrupts off and no requests on the queue and * with the queue lock held. */ -void blk_plug_device(request_queue_t *q) +void blk_plug_device(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device); * remove the queue from the plugged list, if present. called with * queue lock held and interrupts disabled. */ -int blk_remove_plug(request_queue_t *q) +int blk_remove_plug(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug); /* * remove the plug and let it rip.. */ -void __generic_unplug_device(request_queue_t *q) +void __generic_unplug_device(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; @@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device); /** * generic_unplug_device - fire a request queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * Linux uses plugging to build bigger requests queues before letting @@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device); * gets unplugged, the request_fn defined for the queue is invoked and * transfers started. **/ -void generic_unplug_device(request_queue_t *q) +void generic_unplug_device(struct request_queue *q) { spin_lock_irq(q->queue_lock); __generic_unplug_device(q); @@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device); static void blk_backing_dev_unplug(struct backing_dev_info *bdi, struct page *page) { - request_queue_t *q = bdi->unplug_io_data; + struct request_queue *q = bdi->unplug_io_data; /* * devices don't necessarily have an ->unplug_fn defined @@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, static void blk_unplug_work(struct work_struct *work) { - request_queue_t *q = container_of(work, request_queue_t, unplug_work); + struct request_queue *q = + container_of(work, struct request_queue, unplug_work); blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work) static void blk_unplug_timeout(unsigned long data) { - request_queue_t *q = (request_queue_t *)data; + struct request_queue *q = (struct request_queue *)data; blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data) /** * blk_start_queue - restart a previously stopped queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). Queue lock must be held. **/ -void blk_start_queue(request_queue_t *q) +void blk_start_queue(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all @@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue); * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. Queue lock must be held. **/ -void blk_stop_queue(request_queue_t *q) +void blk_stop_queue(struct request_queue *q) { blk_remove_plug(q); set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); @@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q) EXPORT_SYMBOL(blk_run_queue); /** - * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed + * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging of the request queue to be released * * Description: @@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue); **/ static void blk_release_queue(struct kobject *kobj) { - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj) kmem_cache_free(requestq_cachep, q); } -void blk_put_queue(request_queue_t *q) +void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); } EXPORT_SYMBOL(blk_put_queue); -void blk_cleanup_queue(request_queue_t * q) +void blk_cleanup_queue(struct request_queue * q) { mutex_lock(&q->sysfs_lock); set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); @@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q) EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(request_queue_t *q) +static int blk_init_free_list(struct request_queue *q) { struct request_list *rl = &q->rq; @@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q) return 0; } -request_queue_t *blk_alloc_queue(gfp_t gfp_mask) +struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); } @@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue); static struct kobj_type queue_ktype; -request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { - request_queue_t *q; + struct request_queue *q; q = kmem_cache_alloc_node(requestq_cachep, gfp_mask | __GFP_ZERO, node_id); @@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * when the block device is deactivated (such as at module unload). **/ -request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { return blk_init_queue_node(rfn, lock, -1); } EXPORT_SYMBOL(blk_init_queue); -request_queue_t * +struct request_queue * blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { - request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); + struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); if (!q) return NULL; @@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } EXPORT_SYMBOL(blk_init_queue_node); -int blk_get_queue(request_queue_t *q) +int blk_get_queue(struct request_queue *q) { if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { kobject_get(&q->kobj); @@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q) EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(request_queue_t *q, struct request *rq) +static inline void blk_free_request(struct request_queue *q, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); @@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) } static struct request * -blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) +blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. */ -static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) +static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc) return 0; @@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) * is the behaviour we want though - once it gets a wakeup it should be given * a nice run. */ -static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) +static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc || ioc_batching(q, ioc)) return; @@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(request_queue_t *q, int rw) +static void __freed_request(struct request_queue *q, int rw) { struct request_list *rl = &q->rq; @@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(request_queue_t *q, int rw, int priv) +static void freed_request(struct request_queue *q, int rw, int priv) { struct request_list *rl = &q->rq; @@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw_flags, +static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; @@ -2162,7 +2167,7 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw_flags, +static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) { const int rw = rw_flags & 0x01; @@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags, return rq; } -struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) +struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { struct request *rq; @@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request); * * The queue lock must be held with interrupts disabled. */ -void blk_start_queueing(request_queue_t *q) +void blk_start_queueing(struct request_queue *q) { if (!blk_queue_plugged(q)) q->request_fn(q); @@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing); * more, when that condition happens we need to put the request back * on the queue. Must be called with queue lock held. */ -void blk_requeue_request(request_queue_t *q, struct request *rq) +void blk_requeue_request(struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); @@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request); * of the queue for things like a QUEUE_FULL message from a device, or a * host that is unable to accept a particular command. */ -void blk_insert_request(request_queue_t *q, struct request *rq, +void blk_insert_request(struct request_queue *q, struct request *rq, int at_head, void *data) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; @@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio) return ret; } -static int __blk_rq_map_user(request_queue_t *q, struct request *rq, +static int __blk_rq_map_user(struct request_queue *q, struct request *rq, void __user *ubuf, unsigned int len) { unsigned long uaddr; @@ -2403,8 +2408,8 @@ unmap_bio: * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ -int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, - unsigned long len) +int blk_rq_map_user(struct request_queue *q, struct request *rq, + void __user *ubuf, unsigned long len) { unsigned long bytes_read = 0; struct bio *bio = NULL; @@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ -int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, +int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; @@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * @len: length of user data * @gfp_mask: memory allocation flags */ -int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, +int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { struct bio *bio; @@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern); * Insert a fully prepared request at the back of the io scheduler queue * for execution. Don't wait for completion. */ -void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, +void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head, rq_end_io_fn *done) { @@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); * Insert a fully prepared request at the back of the io scheduler queue * for execution and wait for completion. */ -int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, +int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head) { DECLARE_COMPLETION_ONSTACK(wait); @@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq); */ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) { - request_queue_t *q; + struct request_queue *q; if (bdev->bd_disk == NULL) return -ENXIO; @@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) * queue lock is held and interrupts disabled, as we muck with the * request queue list. */ -static inline void add_request(request_queue_t * q, struct request * req) +static inline void add_request(struct request_queue * q, struct request * req) { drive_stat_acct(req, req->nr_sectors, 1); @@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats); /* * queue lock must be held */ -void __blk_put_request(request_queue_t *q, struct request *req) +void __blk_put_request(struct request_queue *q, struct request *req) { if (unlikely(!q)) return; @@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) { unsigned long flags; - request_queue_t *q = req->q; + struct request_queue *q = req->q; /* * Gee, IDE calls in w/ NULL q. Fix IDE and remove the @@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq); /* * Has to be called with the request spinlock acquired */ -static int attempt_merge(request_queue_t *q, struct request *req, +static int attempt_merge(struct request_queue *q, struct request *req, struct request *next) { if (!rq_mergeable(req) || !rq_mergeable(next)) @@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 1; } -static inline int attempt_back_merge(request_queue_t *q, struct request *rq) +static inline int attempt_back_merge(struct request_queue *q, + struct request *rq) { struct request *next = elv_latter_request(q, rq); @@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq) return 0; } -static inline int attempt_front_merge(request_queue_t *q, struct request *rq) +static inline int attempt_front_merge(struct request_queue *q, + struct request *rq) { struct request *prev = elv_former_request(q, rq); @@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio) req->start_time = jiffies; } -static int __make_request(request_queue_t *q, struct bio *bio) +static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; int el_ret, nr_sectors, barrier, err; @@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio) */ static inline void __generic_make_request(struct bio *bio) { - request_queue_t *q; + struct request_queue *q; sector_t maxsector; sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); @@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq) struct bio *bio, *prevbio = NULL; int nr_phys_segs, nr_hw_segs; unsigned int phys_size, hw_size; - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; if (!rq->bio) return; @@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate) EXPORT_SYMBOL(end_request); -void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio) { /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= (bio->bi_rw & 3); @@ -3701,7 +3709,7 @@ int __init blk_dev_init(void) sizeof(struct request), 0, SLAB_PANIC, NULL); requestq_cachep = kmem_cache_create("blkdev_queue", - sizeof(request_queue_t), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue), 0, SLAB_PANIC, NULL); iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); @@ -4021,7 +4029,8 @@ static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) @@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; @@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk) { int ret; - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (!q || !q->request_fn) return -ENXIO; @@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk) void blk_unregister_queue(struct gendisk *disk) { - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (q && q->request_fn) { elv_unregister_queue(q); diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 1c3de2b9a6b5..7563d8aa3944 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -11,13 +11,13 @@ struct noop_data { struct list_head queue; }; -static void noop_merged_requests(request_queue_t *q, struct request *rq, +static void noop_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { list_del_init(&next->queuelist); } -static int noop_dispatch(request_queue_t *q, int force) +static int noop_dispatch(struct request_queue *q, int force) { struct noop_data *nd = q->elevator->elevator_data; @@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force) return 0; } -static void noop_add_request(request_queue_t *q, struct request *rq) +static void noop_add_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; list_add_tail(&rq->queuelist, &nd->queue); } -static int noop_queue_empty(request_queue_t *q) +static int noop_queue_empty(struct request_queue *q) { struct noop_data *nd = q->elevator->elevator_data; @@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q) } static struct request * -noop_former_request(request_queue_t *q, struct request *rq) +noop_former_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; @@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq) } static struct request * -noop_latter_request(request_queue_t *q, struct request *rq) +noop_latter_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; @@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq) return list_entry(rq->queuelist.next, struct request, queuelist); } -static void *noop_init_queue(request_queue_t *q) +static void *noop_init_queue(struct request_queue *q) { struct noop_data *nd; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index d359a715bbc8..91c73224f4c6 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -49,22 +49,22 @@ static int sg_get_version(int __user *p) return put_user(sg_version_num, p); } -static int scsi_get_idlun(request_queue_t *q, int __user *p) +static int scsi_get_idlun(struct request_queue *q, int __user *p) { return put_user(0, p); } -static int scsi_get_bus(request_queue_t *q, int __user *p) +static int scsi_get_bus(struct request_queue *q, int __user *p) { return put_user(0, p); } -static int sg_get_timeout(request_queue_t *q) +static int sg_get_timeout(struct request_queue *q) { return q->sg_timeout / (HZ / USER_HZ); } -static int sg_set_timeout(request_queue_t *q, int __user *p) +static int sg_set_timeout(struct request_queue *q, int __user *p) { int timeout, err = get_user(timeout, p); @@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p) return err; } -static int sg_get_reserved_size(request_queue_t *q, int __user *p) +static int sg_get_reserved_size(struct request_queue *q, int __user *p) { unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); return put_user(val, p); } -static int sg_set_reserved_size(request_queue_t *q, int __user *p) +static int sg_set_reserved_size(struct request_queue *q, int __user *p) { int size, err = get_user(size, p); @@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p) * will always return that we are ATAPI even for a real SCSI drive, I'm not * so sure this is worth doing anything about (why would you care??) */ -static int sg_emulated_host(request_queue_t *q, int __user *p) +static int sg_emulated_host(struct request_queue *q, int __user *p) { return put_user(1, p); } @@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm) } EXPORT_SYMBOL_GPL(blk_verify_command); -static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq, +static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, struct sg_io_hdr *hdr, int has_write_perm) { memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ @@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, return r; } -static int sg_io(struct file *file, request_queue_t *q, +static int sg_io(struct file *file, struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { unsigned long start_time; @@ -519,7 +519,8 @@ error: EXPORT_SYMBOL_GPL(sg_scsi_ioctl); /* Send basic block requests */ -static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) +static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, + int cmd, int data) { struct request *rq; int err; @@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c return err; } -static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data) +static inline int blk_send_start_stop(struct request_queue *q, + struct gendisk *bd_disk, int data) { return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); } diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c index 423ed08fb6f7..d7e18ce8dad9 100644 --- a/drivers/acorn/block/fd1772.c +++ b/drivers/acorn/block/fd1772.c @@ -372,7 +372,7 @@ static int fd_test_drive_present(int drive); static void config_types(void); static int floppy_open(struct inode *inode, struct file *filp); static int floppy_release(struct inode *inode, struct file *filp); -static void do_fd_request(request_queue_t *); +static void do_fd_request(struct request_queue *); /************************* End of Prototypes **************************/ @@ -1271,7 +1271,7 @@ static void fd1772_checkint(void) } } -static void do_fd_request(request_queue_t* q) +static void do_fd_request(struct request_queue* q) { unsigned long flags; diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c index d85520f78e68..74058db674db 100644 --- a/drivers/acorn/block/mfmhd.c +++ b/drivers/acorn/block/mfmhd.c @@ -924,7 +924,7 @@ static void mfm_request(void) DBG("mfm_request: Dropping out bottom\n"); } -static void do_mfm_request(request_queue_t *q) +static void do_mfm_request(struct request_queue *q) { DBG("do_mfm_request: about to mfm_request\n"); mfm_request(); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 12ac0b511f79..e83647651b31 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -768,7 +768,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev, * Decrement max hw segments accordingly. */ if (dev->class == ATA_DEV_ATAPI) { - request_queue_t *q = sdev->request_queue; + struct request_queue *q = sdev->request_queue; blk_queue_max_hw_segments(q, q->max_hw_segments - 1); } diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 6ce8b897e262..c9751b2b57e6 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1422,7 +1422,7 @@ static void redo_fd_request(void) goto repeat; } -static void do_fd_request(request_queue_t * q) +static void do_fd_request(struct request_queue * q) { redo_fd_request(); } diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 1d8466817943..ba07f762c4cb 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -138,7 +138,7 @@ struct aoedev { u16 maxbcnt; struct work_struct work;/* disk create work struct */ struct gendisk *gd; - request_queue_t blkq; + struct request_queue blkq; struct hd_geometry geo; sector_t ssize; struct timer_list timer; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 4f598270fa31..007faaf008e7 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -125,7 +125,7 @@ aoeblk_release(struct inode *inode, struct file *filp) } static int -aoeblk_make_request(request_queue_t *q, struct bio *bio) +aoeblk_make_request(struct request_queue *q, struct bio *bio) { struct aoedev *d; struct buf *buf; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 14d6b9492750..94268c75d04f 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1466,7 +1466,7 @@ repeat: } -void do_fd_request(request_queue_t * q) +void do_fd_request(struct request_queue * q) { unsigned long flags; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index a2d6612b80d2..1be82d544dc3 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -139,7 +139,7 @@ static struct board_type products[] = { static ctlr_info_t *hba[MAX_CTLR]; -static void do_cciss_request(request_queue_t *q); +static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intr(int irq, void *dev_id); static int cciss_open(struct inode *inode, struct file *filep); static int cciss_release(struct inode *inode, struct file *filep); @@ -1584,7 +1584,7 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, */ if (h->gendisk[0] != disk) { if (disk) { - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) del_gendisk(disk); if (q) { @@ -2511,7 +2511,7 @@ after_error_processing: /* * Get a request and submit it to the controller. */ -static void do_cciss_request(request_queue_t *q) +static void do_cciss_request(struct request_queue *q) { ctlr_info_t *h = q->queuedata; CommandList_struct *c; @@ -3380,7 +3380,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, do { drive_info_struct *drv = &(hba[i]->drv[j]); struct gendisk *disk = hba[i]->gendisk[j]; - request_queue_t *q; + struct request_queue *q; /* Check if the disk was allocated already */ if (!disk){ @@ -3523,7 +3523,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) for (j = 0; j < CISS_MAX_LUN; j++) { struct gendisk *disk = hba[i]->gendisk[j]; if (disk) { - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) del_gendisk(disk); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index b94cd1c32131..be4e3477d83b 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -161,7 +161,7 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); -static void do_ida_request(request_queue_t *q); +static void do_ida_request(struct request_queue *q); static void start_io(ctlr_info_t *h); static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); @@ -391,7 +391,7 @@ static void __devexit cpqarray_remove_one_eisa (int i) /* pdev is NULL for eisa */ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) { - request_queue_t *q; + struct request_queue *q; int j; /* @@ -886,7 +886,7 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c) * are in here (either via the dummy do_ida_request functions or by being * called from the interrupt handler */ -static void do_ida_request(request_queue_t *q) +static void do_ida_request(struct request_queue *q) { ctlr_info_t *h = q->queuedata; cmdlist_t *c; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index fe088045dd08..085b7794fb3e 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -251,7 +251,7 @@ static int irqdma_allocated; static struct request *current_req; static struct request_queue *floppy_queue; -static void do_fd_request(request_queue_t * q); +static void do_fd_request(struct request_queue * q); #ifndef fd_get_dma_residue #define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) @@ -2981,7 +2981,7 @@ static void process_fd_request(void) schedule_bh(redo_fd_request); } -static void do_fd_request(request_queue_t * q) +static void do_fd_request(struct request_queue * q) { if (max_buffer_sectors == 0) { printk("VFS: do_fd_request called on non-open device\n"); diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c index 1634c2dd25ec..5b79d0724171 100644 --- a/drivers/block/lguest_blk.c +++ b/drivers/block/lguest_blk.c @@ -137,7 +137,7 @@ static void do_read(struct blockdev *bd, struct request *req) lguest_send_dma(bd->phys_addr, &ping); } -static void do_lgb_request(request_queue_t *q) +static void do_lgb_request(struct request_queue *q) { struct blockdev *bd; struct request *req; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e425daa1eac3..9f015fce4135 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -529,7 +529,7 @@ static struct bio *loop_get_bio(struct loop_device *lo) return bio; } -static int loop_make_request(request_queue_t *q, struct bio *old_bio) +static int loop_make_request(struct request_queue *q, struct bio *old_bio) { struct loop_device *lo = q->queuedata; int rw = bio_rw(old_bio); @@ -558,7 +558,7 @@ out: /* * kick off io on the underlying address space */ -static void loop_unplug(request_queue_t *q) +static void loop_unplug(struct request_queue *q) { struct loop_device *lo = q->queuedata; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c12951024090..be92c658f06e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -100,7 +100,7 @@ static const char *nbdcmd_to_ascii(int cmd) static void nbd_end_request(struct request *req) { int uptodate = (req->errors == 0) ? 1 : 0; - request_queue_t *q = req->q; + struct request_queue *q = req->q; unsigned long flags; dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, @@ -410,7 +410,7 @@ static void nbd_clear_que(struct nbd_device *lo) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ -static void do_nbd_request(request_queue_t * q) +static void do_nbd_request(struct request_queue * q) { struct request *req; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 1eeb8f2cde71..b8a994a2b013 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -183,7 +183,7 @@ static int pcd_packet(struct cdrom_device_info *cdi, static int pcd_detect(void); static void pcd_probe_capabilities(void); static void do_pcd_read_drq(void); -static void do_pcd_request(request_queue_t * q); +static void do_pcd_request(struct request_queue * q); static void do_pcd_read(void); struct pcd_unit { @@ -713,7 +713,7 @@ static int pcd_detect(void) /* I/O request processing */ static struct request_queue *pcd_queue; -static void do_pcd_request(request_queue_t * q) +static void do_pcd_request(struct request_queue * q) { if (pcd_busy) return; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 31e01488eb51..df819f8a95a6 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -698,7 +698,7 @@ static enum action pd_identify(struct pd_unit *disk) /* end of io request engine */ -static void do_pd_request(request_queue_t * q) +static void do_pd_request(struct request_queue * q) { if (pd_req) return; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 5826508f6731..ceffa6034e20 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -202,7 +202,7 @@ module_param_array(drive3, int, NULL, 0); #define ATAPI_WRITE_10 0x2a static int pf_open(struct inode *inode, struct file *file); -static void do_pf_request(request_queue_t * q); +static void do_pf_request(struct request_queue * q); static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); @@ -760,7 +760,7 @@ static void pf_end_request(int uptodate) } } -static void do_pf_request(request_queue_t * q) +static void do_pf_request(struct request_queue * q) { if (pf_busy) return; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 31be33e4f119..fadbfd880bab 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -752,7 +752,7 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio */ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) { - request_queue_t *q = bdev_get_queue(pd->bdev); + struct request_queue *q = bdev_get_queue(pd->bdev); struct request *rq; int ret = 0; @@ -979,7 +979,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) * Special care is needed if the underlying block device has a small * max_phys_segments value. */ -static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q) +static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) { if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { /* @@ -2314,7 +2314,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write) { int ret; long lba; - request_queue_t *q; + struct request_queue *q; /* * We need to re-open the cdrom device without O_NONBLOCK to be able @@ -2477,7 +2477,7 @@ static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int return 0; } -static int pkt_make_request(request_queue_t *q, struct bio *bio) +static int pkt_make_request(struct request_queue *q, struct bio *bio) { struct pktcdvd_device *pd; char b[BDEVNAME_SIZE]; @@ -2626,7 +2626,7 @@ end_io: -static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec) +static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec) { struct pktcdvd_device *pd = q->queuedata; sector_t zone = ZONE(bio->bi_sector, pd); @@ -2647,7 +2647,7 @@ static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *b static void pkt_init_queue(struct pktcdvd_device *pd) { - request_queue_t *q = pd->disk->queue; + struct request_queue *q = pd->disk->queue; blk_queue_make_request(q, pkt_make_request); blk_queue_hardsect_size(q, CD_FRAMESIZE); diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c index 688a4fb0dc99..3c796e236253 100644 --- a/drivers/block/ps2esdi.c +++ b/drivers/block/ps2esdi.c @@ -64,7 +64,7 @@ static void reset_ctrl(void); static int ps2esdi_geninit(void); -static void do_ps2esdi_request(request_queue_t * q); +static void do_ps2esdi_request(struct request_queue * q); static void ps2esdi_readwrite(int cmd, struct request *req); @@ -473,7 +473,7 @@ static void __init ps2esdi_get_device_cfg(void) } /* strategy routine that handles most of the IO requests */ -static void do_ps2esdi_request(request_queue_t * q) +static void do_ps2esdi_request(struct request_queue * q) { struct request *req; /* since, this routine is called with interrupts cleared - they diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 170fb33dba97..aa8b890c80d7 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -190,7 +190,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev, } static void ps3disk_do_request(struct ps3_storage_device *dev, - request_queue_t *q) + struct request_queue *q) { struct request *req; @@ -211,7 +211,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, } } -static void ps3disk_request(request_queue_t *q) +static void ps3disk_request(struct request_queue *q) { struct ps3_storage_device *dev = q->queuedata; struct ps3disk_private *priv = dev->sbd.core.driver_data; @@ -404,7 +404,7 @@ static int ps3disk_identify(struct ps3_storage_device *dev) return 0; } -static void ps3disk_prepare_flush(request_queue_t *q, struct request *req) +static void ps3disk_prepare_flush(struct request_queue *q, struct request *req) { struct ps3_storage_device *dev = q->queuedata; @@ -414,7 +414,7 @@ static void ps3disk_prepare_flush(request_queue_t *q, struct request *req) req->cmd_type = REQ_TYPE_FLUSH; } -static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk, +static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk, sector_t *sector) { struct ps3_storage_device *dev = q->queuedata; diff --git a/drivers/block/rd.c b/drivers/block/rd.c index a1512da32410..65150b548f3a 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -264,7 +264,7 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector, * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support * */ -static int rd_make_request(request_queue_t *q, struct bio *bio) +static int rd_make_request(struct request_queue *q, struct bio *bio) { struct block_device *bdev = bio->bi_bdev; struct address_space * mapping = bdev->bd_inode->i_mapping; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index d50b82381155..4dff49256ac2 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -444,7 +444,7 @@ out: return err; } -static void do_vdc_request(request_queue_t *q) +static void do_vdc_request(struct request_queue *q) { while (1) { struct request *req = elv_next_request(q); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 1a65979f1f0f..b4e462f154ea 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -225,7 +225,7 @@ static unsigned short write_postamble[] = { static void swim3_select(struct floppy_state *fs, int sel); static void swim3_action(struct floppy_state *fs, int action); static int swim3_readbit(struct floppy_state *fs, int bit); -static void do_fd_request(request_queue_t * q); +static void do_fd_request(struct request_queue * q); static void start_request(struct floppy_state *fs); static void set_timeout(struct floppy_state *fs, int nticks, void (*proc)(unsigned long)); @@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit) return (stat & DATA) == 0; } -static void do_fd_request(request_queue_t * q) +static void do_fd_request(struct request_queue * q) { int i; for(i=0;i<floppy_count;i++) diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 949ae93499e5..402209fec59a 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -278,7 +278,7 @@ struct carm_host { unsigned int state; u32 fw_ver; - request_queue_t *oob_q; + struct request_queue *oob_q; unsigned int n_oob; unsigned int hw_sg_used; @@ -287,7 +287,7 @@ struct carm_host { unsigned int wait_q_prod; unsigned int wait_q_cons; - request_queue_t *wait_q[CARM_MAX_WAIT_Q]; + struct request_queue *wait_q[CARM_MAX_WAIT_Q]; unsigned int n_msgs; u64 msg_alloc; @@ -756,7 +756,7 @@ static inline void carm_end_request_queued(struct carm_host *host, assert(rc == 0); } -static inline void carm_push_q (struct carm_host *host, request_queue_t *q) +static inline void carm_push_q (struct carm_host *host, struct request_queue *q) { unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; @@ -768,7 +768,7 @@ static inline void carm_push_q (struct carm_host *host, request_queue_t *q) BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ } -static inline request_queue_t *carm_pop_q(struct carm_host *host) +static inline struct request_queue *carm_pop_q(struct carm_host *host) { unsigned int idx; @@ -783,7 +783,7 @@ static inline request_queue_t *carm_pop_q(struct carm_host *host) static inline void carm_round_robin(struct carm_host *host) { - request_queue_t *q = carm_pop_q(host); + struct request_queue *q = carm_pop_q(host); if (q) { blk_start_queue(q); VPRINTK("STARTED QUEUE %p\n", q); @@ -802,7 +802,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, } } -static void carm_oob_rq_fn(request_queue_t *q) +static void carm_oob_rq_fn(struct request_queue *q) { struct carm_host *host = q->queuedata; struct carm_request *crq; @@ -833,7 +833,7 @@ static void carm_oob_rq_fn(request_queue_t *q) } } -static void carm_rq_fn(request_queue_t *q) +static void carm_rq_fn(struct request_queue *q) { struct carm_port *port = q->queuedata; struct carm_host *host = port->host; @@ -1494,7 +1494,7 @@ static int carm_init_disks(struct carm_host *host) for (i = 0; i < CARM_MAX_PORTS; i++) { struct gendisk *disk; - request_queue_t *q; + struct request_queue *q; struct carm_port *port; port = &host->port[i]; @@ -1538,7 +1538,7 @@ static void carm_free_disks(struct carm_host *host) for (i = 0; i < CARM_MAX_PORTS; i++) { struct gendisk *disk = host->port[i].disk; if (disk) { - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) del_gendisk(disk); @@ -1571,7 +1571,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) struct carm_host *host; unsigned int pci_dac; int rc; - request_queue_t *q; + struct request_queue *q; unsigned int i; if (!printed_version++) diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 8b13d7d2cb63..c57dd2b3a0c8 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -503,7 +503,7 @@ static void ub_cleanup(struct ub_dev *sc) { struct list_head *p; struct ub_lun *lun; - request_queue_t *q; + struct request_queue *q; while (!list_empty(&sc->luns)) { p = sc->luns.next; @@ -619,7 +619,7 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) * The request function is our main entry point */ -static void ub_request_fn(request_queue_t *q) +static void ub_request_fn(struct request_queue *q) { struct ub_lun *lun = q->queuedata; struct request *rq; @@ -2273,7 +2273,7 @@ err_core: static int ub_probe_lun(struct ub_dev *sc, int lnum) { struct ub_lun *lun; - request_queue_t *q; + struct request_queue *q; struct gendisk *disk; int rc; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index dec74bd23496..6b7c02d6360d 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -114,7 +114,7 @@ struct cardinfo { */ struct bio *bio, *currentbio, **biotail; - request_queue_t *queue; + struct request_queue *queue; struct mm_page { dma_addr_t page_dma; @@ -357,7 +357,7 @@ static inline void reset_page(struct mm_page *page) page->biotail = & page->bio; } -static void mm_unplug_device(request_queue_t *q) +static void mm_unplug_device(struct request_queue *q) { struct cardinfo *card = q->queuedata; unsigned long flags; @@ -541,7 +541,7 @@ static void process_page(unsigned long data) -- mm_make_request ----------------------------------------------------------------------------------- */ -static int mm_make_request(request_queue_t *q, struct bio *bio) +static int mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; pr_debug("mm_make_request %llu %u\n", diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index dae39911a11d..85916e2665d4 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -400,7 +400,7 @@ error_ret: /* * This is the external request processing routine */ -static void do_viodasd_request(request_queue_t *q) +static void do_viodasd_request(struct request_queue *q) { struct request *req; diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 0d97b7eb818a..624d30f7da3f 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -298,7 +298,7 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address) } /* do_xd_request: handle an incoming request */ -static void do_xd_request (request_queue_t * q) +static void do_xd_request (struct request_queue * q) { struct request *req; diff --git a/drivers/block/xd.h b/drivers/block/xd.h index 82e090fea957..cffd44a20383 100644 --- a/drivers/block/xd.h +++ b/drivers/block/xd.h @@ -104,7 +104,7 @@ static int xd_manual_geo_init (char *command); static u_char xd_detect (u_char *controller, unsigned int *address); static u_char xd_initdrives (void (*init_drive)(u_char drive)); -static void do_xd_request (request_queue_t * q); +static void do_xd_request (struct request_queue * q); static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg); static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); static void xd_recalibrate (u_char drive); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6746c29181f8..964e51634f2d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -241,7 +241,7 @@ static inline void flush_requests(struct blkfront_info *info) * do_blkif_request * read a block; request is in a request queue */ -static void do_blkif_request(request_queue_t *rq) +static void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; @@ -287,7 +287,7 @@ wait: static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { - request_queue_t *rq; + struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 732ec63b6e9c..cb27e8863d7c 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -458,7 +458,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace) } /* Get the next read/write request; ending requests that we don't handle */ -struct request *ace_get_next_request(request_queue_t * q) +struct request *ace_get_next_request(struct request_queue * q) { struct request *req; @@ -825,7 +825,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id) /* --------------------------------------------------------------------- * Block ops */ -static void ace_request(request_queue_t * q) +static void ace_request(struct request_queue * q) { struct request *req; struct ace_device *ace; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index e40fa98842e5..2d5853cbd4b0 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(z2ram_lock); static struct block_device_operations z2_fops; static struct gendisk *z2ram_gendisk; -static void do_z2_request(request_queue_t *q) +static void do_z2_request(struct request_queue *q) { struct request *req; while ((req = elv_next_request(q)) != NULL) { diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 499019bf8f40..67ee3d4b2878 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2094,7 +2094,7 @@ out: static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, int lba, int nframes) { - request_queue_t *q = cdi->disk->queue; + struct request_queue *q = cdi->disk->queue; struct request *rq; struct bio *bio; unsigned int len; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 44cd7b2ddf09..e51550db1575 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -398,7 +398,7 @@ static void viocd_end_request(struct request *req, int uptodate) static int rwreq; -static void do_viocd_request(request_queue_t *q) +static void do_viocd_request(struct request_queue *q) { struct request *req; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 1486eb212ccc..ca843522f91d 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -3071,7 +3071,7 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; } /* * standard prep_rq_fn that builds 10 byte cmds */ -static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq) +static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) { int hard_sect = queue_hardsect_size(q); long block = (long)rq->hard_sector / (hard_sect >> 9); @@ -3137,7 +3137,7 @@ static int ide_cdrom_prep_pc(struct request *rq) return BLKPREP_OK; } -static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) +static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) { if (blk_fs_request(rq)) return ide_cdrom_prep_fs(q, rq); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index b1304a7f3e0a..5ce4216f72a2 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -679,7 +679,7 @@ static ide_proc_entry_t idedisk_proc[] = { }; #endif /* CONFIG_IDE_PROC_FS */ -static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) +static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) { ide_drive_t *drive = q->queuedata; @@ -697,7 +697,7 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) rq->buffer = rq->cmd; } -static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, +static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { ide_drive_t *drive = q->queuedata; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 484c50e71446..aa9f5f0b1e67 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -1327,7 +1327,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) /* * Passes the stuff to ide_do_request */ -void do_ide_request(request_queue_t *q) +void do_ide_request(struct request_queue *q) { ide_drive_t *drive = q->queuedata; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 5a4c5ea12f89..3a2a9a338fd9 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -945,7 +945,7 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match) */ static int ide_init_queue(ide_drive_t *drive) { - request_queue_t *q; + struct request_queue *q; ide_hwif_t *hwif = HWIF(drive); int max_sectors = 256; int max_sg_entries = PRD_ENTRIES; diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c index 8f2db8dd35f7..8e05d88e81ba 100644 --- a/drivers/ide/legacy/hd.c +++ b/drivers/ide/legacy/hd.c @@ -652,7 +652,7 @@ repeat: } } -static void do_hd_request (request_queue_t * q) +static void do_hd_request (struct request_queue * q) { disable_irq(HD_IRQ); hd_request(); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2fc199b0016b..2bcde5798b5a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) { - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); struct io_restrictions *rs = &ti->limits; /* @@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) devices = dm_table_get_devices(t); for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); r |= bdi_congested(&q->backing_dev_info, bdi_bits); } @@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t) for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); if (q->unplug_fn) q->unplug_fn(q); @@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t) for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); int err; if (!q->issue_flush_fn) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 846614e676c6..141ff9fa296e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -80,7 +80,7 @@ struct mapped_device { unsigned long flags; - request_queue_t *queue; + struct request_queue *queue; struct gendisk *disk; char name[16]; @@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio) * The request function that just remaps the bio built up by * dm_merge_bvec. */ -static int dm_request(request_queue_t *q, struct bio *bio) +static int dm_request(struct request_queue *q, struct bio *bio) { int r; int rw = bio_data_dir(bio); @@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio) return 0; } -static int dm_flush_all(request_queue_t *q, struct gendisk *disk, +static int dm_flush_all(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { struct mapped_device *md = q->queuedata; @@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk, return ret; } -static void dm_unplug_all(request_queue_t *q) +static void dm_unplug_all(struct request_queue *q) { struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_table(md); @@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size) static int __bind(struct mapped_device *md, struct dm_table *t) { - request_queue_t *q = md->queue; + struct request_queue *q = md->queue; sector_t size; size = dm_table_get_size(t); diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 4ebd0f2a75ec..cb059cf14c2e 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode) conf->nfaults = n+1; } -static int make_request(request_queue_t *q, struct bio *bio) +static int make_request(struct request_queue *q, struct bio *bio) { mddev_t *mddev = q->queuedata; conf_t *conf = (conf_t*)mddev->private; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 192741083196..17f795c3e0ab 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) * * Return amount of bytes we can take at this offset */ -static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) +static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) { mddev_t *mddev = q->queuedata; dev_info_t *dev0; @@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio return maxsectors << 9; } -static void linear_unplug(request_queue_t *q) +static void linear_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; linear_conf_t *conf = mddev_to_conf(mddev); int i; for (i=0; i < mddev->raid_disks; i++) { - request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); + struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); if (r_queue->unplug_fn) r_queue->unplug_fn(r_queue); } } -static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, +static int linear_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, for (i=0; i < mddev->raid_disks && ret == 0; i++) { struct block_device *bdev = conf->disks[i].rdev->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits) int i, ret = 0; for (i = 0; i < mddev->raid_disks && !ret ; i++) { - request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev); + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } return ret; @@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev) return 0; } -static int linear_make_request (request_queue_t *q, struct bio *bio) +static int linear_make_request (struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); mddev_t *mddev = q->queuedata; diff --git a/drivers/md/md.c b/drivers/md/md.c index 65ddc887dfd7..f883b7e37f3d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock); ) -static int md_fail_request (request_queue_t *q, struct bio *bio) +static int md_fail_request (struct request_queue *q, struct bio *bio) { bio_io_error(bio, bio->bi_size); return 0; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 14da37fee37b..1e2af43a73b9 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev) mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); @@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_unlock(); } -static void multipath_unplug(request_queue_t *q) +static void multipath_unplug(struct request_queue *q) { unplug_slaves(q->queuedata); } -static int multipath_make_request (request_queue_t *q, struct bio * bio) +static int multipath_make_request (struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; multipath_conf_t *conf = mddev_to_conf(mddev); @@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) seq_printf (seq, "]"); } -static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, +static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits) for (i = 0; i < mddev->raid_disks ; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { - request_queue_t *q = bdev_get_queue(rdev->bdev); + struct request_queue *q = bdev_get_queue(rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); /* Just like multipath_map, we just check the diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 2c404f73a377..b8216bc6db45 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -25,7 +25,7 @@ #define MD_DRIVER #define MD_PERSONALITY -static void raid0_unplug(request_queue_t *q) +static void raid0_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; raid0_conf_t *conf = mddev_to_conf(mddev); @@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q) int i; for (i=0; i<mddev->raid_disks; i++) { - request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); + struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); if (r_queue->unplug_fn) r_queue->unplug_fn(r_queue); } } -static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, +static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, for (i=0; i<mddev->raid_disks && ret == 0; i++) { struct block_device *bdev = devlist[i]->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits) int i, ret = 0; for (i = 0; i < mddev->raid_disks && !ret ; i++) { - request_queue_t *q = bdev_get_queue(devlist[i]->bdev); + struct request_queue *q = bdev_get_queue(devlist[i]->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } @@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev) * * Return amount of bytes we can accept at this offset */ -static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) +static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) { mddev_t *mddev = q->queuedata; sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); @@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev) return 0; } -static int raid0_make_request (request_queue_t *q, struct bio *bio) +static int raid0_make_request (struct request_queue *q, struct bio *bio) { mddev_t *mddev = q->queuedata; unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 00c78b77b13d..650991bddd8e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev) for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); @@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_unlock(); } -static void raid1_unplug(request_queue_t *q) +static void raid1_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; @@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q) md_wakeup_thread(mddev->thread); } -static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, +static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits) for (i = 0; i < mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { - request_queue_t *q = bdev_get_queue(rdev->bdev); + struct request_queue *q = bdev_get_queue(rdev->bdev); /* Note the '|| 1' - when read_balance prefers * non-congested targets, it can be removed @@ -765,7 +765,7 @@ do_sync_io: return NULL; } -static int make_request(request_queue_t *q, struct bio * bio) +static int make_request(struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; conf_t *conf = mddev_to_conf(mddev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a95ada1cfac4..f730a144baf1 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev) * If near_copies == raid_disk, there are no striping issues, * but in that case, the function isn't called at all. */ -static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio, +static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bio_vec) { mddev_t *mddev = q->queuedata; @@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev) for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); @@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_unlock(); } -static void raid10_unplug(request_queue_t *q) +static void raid10_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; @@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q) md_wakeup_thread(mddev->thread); } -static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, +static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits) for (i = 0; i < mddev->raid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { - request_queue_t *q = bdev_get_queue(rdev->bdev); + struct request_queue *q = bdev_get_queue(rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } @@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf) spin_unlock_irq(&conf->resync_lock); } -static int make_request(request_queue_t *q, struct bio * bio) +static int make_request(struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; conf_t *conf = mddev_to_conf(mddev); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d90ee145effe..2aff4be35dc4 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in } static void unplug_slaves(mddev_t *mddev); -static void raid5_unplug_device(request_queue_t *q); +static void raid5_unplug_device(struct request_queue *q); static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, int pd_idx, int noblock) @@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev) for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); @@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_unlock(); } -static void raid5_unplug_device(request_queue_t *q) +static void raid5_unplug_device(struct request_queue *q) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); @@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q) unplug_slaves(mddev); } -static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, +static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; @@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; - request_queue_t *r_queue = bdev_get_queue(bdev); + struct request_queue *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; @@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits) /* We want read requests to align with chunks where possible, * but write requests don't need to. */ -static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) +static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) { mddev_t *mddev = q->queuedata; sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); @@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) static int bio_fits_rdev(struct bio *bi) { - request_queue_t *q = bdev_get_queue(bi->bi_bdev); + struct request_queue *q = bdev_get_queue(bi->bi_bdev); if ((bi->bi_size>>9) > q->max_sectors) return 0; @@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi) } -static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) +static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); @@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) } -static int make_request(request_queue_t *q, struct bio * bi) +static int make_request(struct request_queue *q, struct bio * bi) { mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 988c8ce47f58..5e1c99f83ab5 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -159,7 +159,7 @@ static int i2o_block_device_flush(struct i2o_device *dev) * Returns 0 on success or negative error code on failure. */ -static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, +static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk, sector_t * error_sector) { struct i2o_block_device *i2o_blk_dev = queue->queuedata; @@ -445,7 +445,7 @@ static void i2o_block_end_request(struct request *req, int uptodate, { struct i2o_block_request *ireq = req->special; struct i2o_block_device *dev = ireq->i2o_blk_dev; - request_queue_t *q = req->q; + struct request_queue *q = req->q; unsigned long flags; if (end_that_request_chunk(req, uptodate, nr_bytes)) { diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index b53dac8d1b69..e02eac876362 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -83,7 +83,7 @@ static int mmc_queue_thread(void *d) * on any queue on this host, and attempt to issue it. This may * not be the queue we were asked to process. */ -static void mmc_request(request_queue_t *q) +static void mmc_request(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; @@ -211,7 +211,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock void mmc_cleanup_queue(struct mmc_queue *mq) { - request_queue_t *q = mq->queue; + struct request_queue *q = mq->queue; unsigned long flags; /* Mark that we should start throwing out stragglers */ @@ -252,7 +252,7 @@ EXPORT_SYMBOL(mmc_cleanup_queue); */ void mmc_queue_suspend(struct mmc_queue *mq) { - request_queue_t *q = mq->queue; + struct request_queue *q = mq->queue; unsigned long flags; if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { @@ -272,7 +272,7 @@ void mmc_queue_suspend(struct mmc_queue *mq) */ void mmc_queue_resume(struct mmc_queue *mq) { - request_queue_t *q = mq->queue; + struct request_queue *q = mq->queue; unsigned long flags; if (mq->flags & MMC_QUEUE_SUSPENDED) { diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index bfeca57098fa..e6bfce690ca3 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1187,7 +1187,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) static void __dasd_process_blk_queue(struct dasd_device * device) { - request_queue_t *queue; + struct request_queue *queue; struct request *req; struct dasd_ccw_req *cqr; int nr_queued; @@ -1740,7 +1740,7 @@ dasd_cancel_req(struct dasd_ccw_req *cqr) * Dasd request queue function. Called from ll_rw_blk.c */ static void -do_dasd_request(request_queue_t * queue) +do_dasd_request(struct request_queue * queue) { struct dasd_device *device; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 241294cba415..aeda52682446 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -293,7 +293,7 @@ struct dasd_uid { struct dasd_device { /* Block device stuff. */ struct gendisk *gdp; - request_queue_t *request_queue; + struct request_queue *request_queue; spinlock_t request_queue_lock; struct block_device *bdev; unsigned int devindex; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 35765f6a86e0..4d8798bacf97 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -621,7 +621,7 @@ out: } static int -dcssblk_make_request(request_queue_t *q, struct bio *bio) +dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec *bvec; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index a04d9120cef0..354a060e5bec 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -191,7 +191,7 @@ static unsigned long __init xpram_highest_page_index(void) /* * Block device make request function. */ -static int xpram_make_request(request_queue_t *q, struct bio *bio) +static int xpram_make_request(struct request_queue *q, struct bio *bio) { xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; struct bio_vec *bvec; diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 3b52f5c1dbef..dddf8d62c153 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -188,7 +188,7 @@ struct tape_blk_data { struct tape_device * device; /* Block device request queue. */ - request_queue_t * request_queue; + struct request_queue * request_queue; spinlock_t request_queue_lock; /* Task to move entries from block request to CCS request queue. */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index dd0ecaed592e..eeb92e2ed0cc 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -147,7 +147,7 @@ static void tapeblock_requeue(struct work_struct *work) { struct tape_blk_data * blkdat; struct tape_device * device; - request_queue_t * queue; + struct request_queue * queue; int nr_queued; struct request * req; struct list_head * l; @@ -194,7 +194,7 @@ tapeblock_requeue(struct work_struct *work) { * Tape request queue function. Called from ll_rw_blk.c */ static void -tapeblock_request_fn(request_queue_t *queue) +tapeblock_request_fn(struct request_queue *queue) { struct tape_device *device; diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 5157a2abc58d..4b7079fdc10c 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -185,7 +185,7 @@ static void jsfd_read(char *buf, unsigned long p, size_t togo) { } } -static void jsfd_do_request(request_queue_t *q) +static void jsfd_do_request(struct request_queue *q) { struct request *req; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index da63c544919b..21c075d44db1 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -654,7 +654,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost) static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, int bytes, int requeue) { - request_queue_t *q = cmd->device->request_queue; + struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; unsigned long flags; @@ -818,7 +818,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; int this_count = cmd->request_bufflen; - request_queue_t *q = cmd->device->request_queue; + struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; int clear_errors = 1; struct scsi_sense_hdr sshdr; @@ -1038,7 +1038,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) return BLKPREP_KILL; } -static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, +static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk, sector_t *error_sector) { struct scsi_device *sdev = q->queuedata; @@ -1340,7 +1340,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, /* * Kill a request for a dead device */ -static void scsi_kill_request(struct request *req, request_queue_t *q) +static void scsi_kill_request(struct request *req, struct request_queue *q) { struct scsi_cmnd *cmd = req->special; struct scsi_device *sdev = cmd->device; @@ -2119,7 +2119,7 @@ EXPORT_SYMBOL(scsi_target_resume); int scsi_internal_device_block(struct scsi_device *sdev) { - request_queue_t *q = sdev->request_queue; + struct request_queue *q = sdev->request_queue; unsigned long flags; int err = 0; @@ -2159,7 +2159,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); int scsi_internal_device_unblock(struct scsi_device *sdev) { - request_queue_t *q = sdev->request_queue; + struct request_queue *q = sdev->request_queue; int err; unsigned long flags; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 424d557284a9..e21c7142a3ea 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -814,7 +814,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector) return ret; } -static void sd_prepare_flush(request_queue_t *q, struct request *rq) +static void sd_prepare_flush(struct request_queue *q, struct request *rq) { memset(rq->cmd, 0, sizeof(rq->cmd)); rq->cmd_type = REQ_TYPE_BLOCK_PC; @@ -1285,7 +1285,7 @@ got_data: */ int hard_sector = sector_size; sector_t sz = (sdkp->capacity/2) * (hard_sector/256); - request_queue_t *queue = sdp->request_queue; + struct request_queue *queue = sdp->request_queue; sector_t mb = sz; blk_queue_hardsect_size(queue, hard_sector); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index e7b6a7fde1cb..902eb11ffe8a 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -624,7 +624,7 @@ static void get_sectorsize(struct scsi_cd *cd) unsigned char *buffer; int the_result, retries = 3; int sector_size; - request_queue_t *queue; + struct request_queue *queue; buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) @@ -230,7 +230,7 @@ void bio_put(struct bio *bio) } } -inline int bio_phys_segments(request_queue_t *q, struct bio *bio) +inline int bio_phys_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio) return bio->bi_phys_segments; } -inline int bio_hw_segments(request_queue_t *q, struct bio *bio) +inline int bio_hw_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) */ void __bio_clone(struct bio *bio, struct bio *bio_src) { - request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); + struct request_queue *q = bdev_get_queue(bio_src->bi_bdev); memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec)); @@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) */ int bio_get_nr_vecs(struct block_device *bdev) { - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); int nr_pages; nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev) return nr_pages; } -static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page +static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned short max_sectors) { @@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page * smaller than PAGE_SIZE, so it is always possible to add a single * page to an empty bio. This should only be used by REQ_PC bios. */ -int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); @@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio) * to/from kernel pages as necessary. Must be paired with * call bio_uncopy_user() on io completion. */ -struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, +struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, unsigned int len, int write_to_vm) { unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -600,7 +600,7 @@ out_bmd: return ERR_PTR(ret); } -static struct bio *__bio_map_user_iov(request_queue_t *q, +static struct bio *__bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) @@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, /** * bio_map_user - map user address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @uaddr: start of user address * @len: length in bytes @@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) { struct sg_iovec iov; @@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, /** * bio_map_user_iov - map user sg_iovec table into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @iov: the iovec. * @iov_count: number of elements in the iovec @@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) { @@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) } -static struct bio *__bio_map_kern(request_queue_t *q, void *data, +static struct bio *__bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; @@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, /** * bio_map_kern - map kernel address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @data: pointer to buffer to map * @len: length in bytes * @gfp_mask: allocation flags for bio allocation @@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, * Map the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, +struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { struct bio *bio; diff --git a/include/asm-arm/arch-omap/mailbox.h b/include/asm-arm/arch-omap/mailbox.h index 4bf0909461f2..7cbed9332e16 100644 --- a/include/asm-arm/arch-omap/mailbox.h +++ b/include/asm-arm/arch-omap/mailbox.h @@ -37,7 +37,7 @@ struct omap_mbox_ops { struct omap_mbox_queue { spinlock_t lock; - request_queue_t *queue; + struct request_queue *queue; struct work_struct work; int (*callback)(void *); struct omap_mbox *mbox; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 695e34964cb7..a1c96d9ee720 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -37,7 +37,7 @@ struct scsi_ioctl_command; struct request_queue; -typedef struct request_queue request_queue_t; +typedef struct request_queue request_queue_t __deprecated; struct elevator_queue; typedef struct elevator_queue elevator_t; struct request_pm_state; @@ -233,7 +233,7 @@ struct request { struct list_head queuelist; struct list_head donelist; - request_queue_t *q; + struct request_queue *q; unsigned int cmd_flags; enum rq_cmd_type_bits cmd_type; @@ -337,15 +337,15 @@ struct request_pm_state #include <linux/elevator.h> -typedef void (request_fn_proc) (request_queue_t *q); -typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); -typedef int (prep_rq_fn) (request_queue_t *, struct request *); -typedef void (unplug_fn) (request_queue_t *); +typedef void (request_fn_proc) (struct request_queue *q); +typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unplug_fn) (struct request_queue *); struct bio_vec; -typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); -typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); -typedef void (prepare_flush_fn) (request_queue_t *, struct request *); +typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); +typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *); +typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); enum blk_queue_state { @@ -626,13 +626,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; #ifdef CONFIG_BOUNCE extern int init_emergency_isa_pool(void); -extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); +extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); #else static inline int init_emergency_isa_pool(void) { return 0; } -static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) +static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) { } #endif /* CONFIG_MMU */ @@ -646,14 +646,14 @@ extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); extern void blk_put_request(struct request *); -extern void __blk_put_request(request_queue_t *, struct request *); +extern void __blk_put_request(struct request_queue *, struct request *); extern void blk_end_sync_rq(struct request *rq, int error); -extern struct request *blk_get_request(request_queue_t *, int, gfp_t); -extern void blk_insert_request(request_queue_t *, struct request *, int, void *); -extern void blk_requeue_request(request_queue_t *, struct request *); -extern void blk_plug_device(request_queue_t *); -extern int blk_remove_plug(request_queue_t *); -extern void blk_recount_segments(request_queue_t *, struct bio *); +extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern void blk_insert_request(struct request_queue *, struct request *, int, void *); +extern void blk_requeue_request(struct request_queue *, struct request *); +extern void blk_plug_device(struct request_queue *); +extern int blk_remove_plug(struct request_queue *); +extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, struct gendisk *, unsigned int, void __user *); extern int sg_scsi_ioctl(struct file *, struct request_queue *, @@ -662,14 +662,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *, /* * Temporary export, until SCSI gets fixed up. */ -extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); +extern int ll_back_merge_fn(struct request_queue *, struct request *, + struct bio *); /* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. */ -static inline void blk_clear_queue_congested(request_queue_t *q, int rw) +static inline void blk_clear_queue_congested(struct request_queue *q, int rw) { clear_bdi_congested(&q->backing_dev_info, rw); } @@ -678,29 +679,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw) * A queue has just entered congestion. Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */ -static inline void blk_set_queue_congested(request_queue_t *q, int rw) +static inline void blk_set_queue_congested(struct request_queue *q, int rw) { set_bdi_congested(&q->backing_dev_info, rw); } -extern void blk_start_queue(request_queue_t *q); -extern void blk_stop_queue(request_queue_t *q); +extern void blk_start_queue(struct request_queue *q); +extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern void __blk_stop_queue(request_queue_t *q); -extern void blk_run_queue(request_queue_t *); -extern void blk_start_queueing(request_queue_t *); -extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); +extern void __blk_stop_queue(struct request_queue *q); +extern void blk_run_queue(struct request_queue *); +extern void blk_start_queueing(struct request_queue *); +extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); extern int blk_rq_unmap_user(struct bio *); -extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); -extern int blk_rq_map_user_iov(request_queue_t *, struct request *, +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct sg_iovec *, int, unsigned int); -extern int blk_execute_rq(request_queue_t *, struct gendisk *, +extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); -extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); extern int blk_verify_command(unsigned char *, int); -static inline request_queue_t *bdev_get_queue(struct block_device *bdev) +static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { return bdev->bd_disk->queue; } @@ -749,41 +750,41 @@ static inline void blkdev_dequeue_request(struct request *req) /* * Access functions for manipulating queue properties */ -extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, +extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); -extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); -extern void blk_cleanup_queue(request_queue_t *); -extern void blk_queue_make_request(request_queue_t *, make_request_fn *); -extern void blk_queue_bounce_limit(request_queue_t *, u64); -extern void blk_queue_max_sectors(request_queue_t *, unsigned int); -extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); -extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); -extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); -extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); -extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); -extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); -extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); -extern void blk_queue_dma_alignment(request_queue_t *, int); -extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); +extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_make_request(struct request_queue *, make_request_fn *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); -extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); -extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); -extern int blk_do_ordered(request_queue_t *, struct request **); -extern unsigned blk_ordered_cur_seq(request_queue_t *); +extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); +extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *); +extern int blk_do_ordered(struct request_queue *, struct request **); +extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); +extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); -extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); +extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); -extern void generic_unplug_device(request_queue_t *); -extern void __generic_unplug_device(request_queue_t *); +extern void generic_unplug_device(struct request_queue *); +extern void __generic_unplug_device(struct request_queue *); extern long nr_blockdev_pages(void); -int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(gfp_t); -request_queue_t *blk_alloc_queue_node(gfp_t, int); -extern void blk_put_queue(request_queue_t *); +int blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(gfp_t); +struct request_queue *blk_alloc_queue_node(gfp_t, int); +extern void blk_put_queue(struct request_queue *); /* * tag stuff @@ -791,13 +792,13 @@ extern void blk_put_queue(request_queue_t *); #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) -extern int blk_queue_start_tag(request_queue_t *, struct request *); -extern struct request *blk_queue_find_tag(request_queue_t *, int); -extern void blk_queue_end_tag(request_queue_t *, struct request *); -extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); -extern void blk_queue_free_tags(request_queue_t *); -extern int blk_queue_resize_tags(request_queue_t *, int); -extern void blk_queue_invalidate_tags(request_queue_t *); +extern int blk_queue_start_tag(struct request_queue *, struct request *); +extern struct request *blk_queue_find_tag(struct request_queue *, int); +extern void blk_queue_end_tag(struct request_queue *, struct request *); +extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); +extern void blk_queue_free_tags(struct request_queue *); +extern int blk_queue_resize_tags(struct request_queue *, int); +extern void blk_queue_invalidate_tags(struct request_queue *); extern struct blk_queue_tag *blk_init_tags(int); extern void blk_free_tags(struct blk_queue_tag *); @@ -809,7 +810,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, return bqt->tag_index[tag]; } -extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); +extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *); extern int blkdev_issue_flush(struct block_device *, sector_t *); #define MAX_PHYS_SEGMENTS 128 @@ -821,7 +822,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -static inline int queue_hardsect_size(request_queue_t *q) +static inline int queue_hardsect_size(struct request_queue *q) { int retval = 512; @@ -836,7 +837,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev) return queue_hardsect_size(bdev_get_queue(bdev)); } -static inline int queue_dma_alignment(request_queue_t *q) +static inline int queue_dma_alignment(struct request_queue *q) { int retval = 511; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 3680ff9a30ed..90874a5d7d78 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -144,7 +144,7 @@ struct blk_user_trace_setup { #if defined(CONFIG_BLK_DEV_IO_TRACE) extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); -extern void blk_trace_shutdown(request_queue_t *); +extern void blk_trace_shutdown(struct request_queue *); extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); /** diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e88fcbc77f8f..e8f42133a616 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -5,29 +5,29 @@ #ifdef CONFIG_BLOCK -typedef int (elevator_merge_fn) (request_queue_t *, struct request **, +typedef int (elevator_merge_fn) (struct request_queue *, struct request **, struct bio *); -typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); +typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); -typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); +typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); -typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *); +typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); -typedef int (elevator_dispatch_fn) (request_queue_t *, int); +typedef int (elevator_dispatch_fn) (struct request_queue *, int); -typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); -typedef int (elevator_queue_empty_fn) (request_queue_t *); -typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); -typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); -typedef int (elevator_may_queue_fn) (request_queue_t *, int); +typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); +typedef int (elevator_queue_empty_fn) (struct request_queue *); +typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); +typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); +typedef int (elevator_may_queue_fn) (struct request_queue *, int); -typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t); +typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); typedef void (elevator_put_req_fn) (struct request *); -typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); -typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); +typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); +typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); -typedef void *(elevator_init_fn) (request_queue_t *); +typedef void *(elevator_init_fn) (struct request_queue *); typedef void (elevator_exit_fn) (elevator_t *); struct elevator_ops @@ -94,27 +94,27 @@ struct elevator_queue /* * block elevator interface */ -extern void elv_dispatch_sort(request_queue_t *, struct request *); -extern void elv_dispatch_add_tail(request_queue_t *, struct request *); -extern void elv_add_request(request_queue_t *, struct request *, int, int); -extern void __elv_add_request(request_queue_t *, struct request *, int, int); -extern void elv_insert(request_queue_t *, struct request *, int); -extern int elv_merge(request_queue_t *, struct request **, struct bio *); -extern void elv_merge_requests(request_queue_t *, struct request *, +extern void elv_dispatch_sort(struct request_queue *, struct request *); +extern void elv_dispatch_add_tail(struct request_queue *, struct request *); +extern void elv_add_request(struct request_queue *, struct request *, int, int); +extern void __elv_add_request(struct request_queue *, struct request *, int, int); +extern void elv_insert(struct request_queue *, struct request *, int); +extern int elv_merge(struct request_queue *, struct request **, struct bio *); +extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); -extern void elv_merged_request(request_queue_t *, struct request *, int); -extern void elv_dequeue_request(request_queue_t *, struct request *); -extern void elv_requeue_request(request_queue_t *, struct request *); -extern int elv_queue_empty(request_queue_t *); +extern void elv_merged_request(struct request_queue *, struct request *, int); +extern void elv_dequeue_request(struct request_queue *, struct request *); +extern void elv_requeue_request(struct request_queue *, struct request *); +extern int elv_queue_empty(struct request_queue *); extern struct request *elv_next_request(struct request_queue *q); -extern struct request *elv_former_request(request_queue_t *, struct request *); -extern struct request *elv_latter_request(request_queue_t *, struct request *); -extern int elv_register_queue(request_queue_t *q); -extern void elv_unregister_queue(request_queue_t *q); -extern int elv_may_queue(request_queue_t *, int); -extern void elv_completed_request(request_queue_t *, struct request *); -extern int elv_set_request(request_queue_t *, struct request *, gfp_t); -extern void elv_put_request(request_queue_t *, struct request *); +extern struct request *elv_former_request(struct request_queue *, struct request *); +extern struct request *elv_latter_request(struct request_queue *, struct request *); +extern int elv_register_queue(struct request_queue *q); +extern void elv_unregister_queue(struct request_queue *q); +extern int elv_may_queue(struct request_queue *, int); +extern void elv_completed_request(struct request_queue *, struct request *); +extern int elv_set_request(struct request_queue *, struct request *, gfp_t); +extern void elv_put_request(struct request_queue *, struct request *); /* * io scheduler registration @@ -125,18 +125,18 @@ extern void elv_unregister(struct elevator_type *); /* * io scheduler sysfs switching */ -extern ssize_t elv_iosched_show(request_queue_t *, char *); -extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t); +extern ssize_t elv_iosched_show(struct request_queue *, char *); +extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); -extern int elevator_init(request_queue_t *, char *); +extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(elevator_t *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* * Helper functions. */ -extern struct request *elv_rb_former_request(request_queue_t *, struct request *); -extern struct request *elv_rb_latter_request(request_queue_t *, struct request *); +extern struct request *elv_rb_former_request(struct request_queue *, struct request *); +extern struct request *elv_rb_latter_request(struct request_queue *, struct request *); /* * rb support functions. diff --git a/include/linux/ide.h b/include/linux/ide.h index 5f5daad8bc54..d71d0121b7f9 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -555,7 +555,7 @@ typedef struct ide_drive_s { char name[4]; /* drive name, such as "hda" */ char driver_req[10]; /* requests specific driver */ - request_queue_t *queue; /* request queue */ + struct request_queue *queue; /* request queue */ struct request *rq; /* current request */ struct ide_drive_s *next; /* circular list of hwgroup drives */ @@ -1206,7 +1206,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); extern int ide_spin_wait_hwgroup(ide_drive_t *); extern void ide_timer_expiry(unsigned long); extern irqreturn_t ide_intr(int irq, void *dev_id); -extern void do_ide_request(request_queue_t *); +extern void do_ide_request(struct request_queue *); void ide_init_disk(struct gendisk *, ide_drive_t *); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 28ac632b42dd..dcb729244f47 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -227,7 +227,7 @@ struct mddev_s unsigned int safemode_delay; struct timer_list safemode_timer; atomic_t writes_pending; - request_queue_t *queue; /* for plugging ... */ + struct request_queue *queue; /* for plugging ... */ atomic_t write_behind; /* outstanding async IO */ unsigned int max_write_behind; /* 0 = sync */ @@ -265,7 +265,7 @@ struct mdk_personality int level; struct list_head list; struct module *owner; - int (*make_request)(request_queue_t *q, struct bio *bio); + int (*make_request)(struct request_queue *q, struct bio *bio); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); void (*status)(struct seq_file *seq, mddev_t *mddev); diff --git a/include/scsi/sd.h b/include/scsi/sd.h index 5261488e1108..78583fee0ab2 100644 --- a/include/scsi/sd.h +++ b/include/scsi/sd.h @@ -57,7 +57,7 @@ static int sd_resume(struct device *dev); static void sd_rescan(struct device *); static int sd_init_command(struct scsi_cmnd *); static int sd_issue_flush(struct device *, sector_t *); -static void sd_prepare_flush(request_queue_t *, struct request *); +static void sd_prepare_flush(struct request_queue *, struct request *); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct class_device *cdev); static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); diff --git a/mm/bounce.c b/mm/bounce.c index ad401fc57440..179fe38a2416 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int return 0; } -static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, +static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, mempool_t *pool) { struct page *page; @@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, *bio_orig = bio; } -void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) +void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) { mempool_t *pool; |