summaryrefslogtreecommitdiffstats
path: root/block/blk.h
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2014-09-25 17:23:43 +0200
committerJens Axboe <axboe@fb.com>2014-09-25 23:22:40 +0200
commit7c94e1c157a227837b04f02f5edeff8301410ba2 (patch)
tree3b592095b0204ebad61dd22b77aa1e72595dbc3a /block/blk.h
parentblock: avoid to use q->flush_rq directly (diff)
downloadlinux-7c94e1c157a227837b04f02f5edeff8301410ba2.tar.xz
linux-7c94e1c157a227837b04f02f5edeff8301410ba2.zip
block: introduce blk_flush_queue to drive flush machinery
This patch introduces 'struct blk_flush_queue' and puts all flush machinery related fields into this structure, so that - flush implementation details aren't exposed to driver - it is easy to convert to per dispatch-queue flush machinery This patch is basically a mechanical replacement. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h22
1 files changed, 20 insertions, 2 deletions
diff --git a/block/blk.h b/block/blk.h
index c6fa3d4c6a89..833c4ac6c4eb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -12,11 +12,28 @@
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
+struct blk_flush_queue {
+ unsigned int flush_queue_delayed:1;
+ unsigned int flush_pending_idx:1;
+ unsigned int flush_running_idx:1;
+ unsigned long flush_pending_since;
+ struct list_head flush_queue[2];
+ struct list_head flush_data_in_flight;
+ struct request *flush_rq;
+ spinlock_t mq_flush_lock;
+};
+
extern struct kmem_cache *blk_requestq_cachep;
extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
+static inline struct blk_flush_queue *blk_get_flush_queue(
+ struct request_queue *q)
+{
+ return q->fq;
+}
+
static inline void __blk_get_queue(struct request_queue *q)
{
kobject_get(&q->kobj);
@@ -89,6 +106,7 @@ void blk_insert_flush(struct request *rq);
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
+ struct blk_flush_queue *fq = blk_get_flush_queue(q);
while (1) {
if (!list_empty(&q->queue_head)) {
@@ -111,9 +129,9 @@ static inline struct request *__elv_next_request(struct request_queue *q)
* should be restarted later. Please see flush_end_io() for
* details.
*/
- if (q->flush_pending_idx != q->flush_running_idx &&
+ if (fq->flush_pending_idx != fq->flush_running_idx &&
!queue_flush_queueable(q)) {
- q->flush_queue_delayed = 1;
+ fq->flush_queue_delayed = 1;
return NULL;
}
if (unlikely(blk_queue_bypass(q)) ||