summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h51
1 files changed, 41 insertions, 10 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0d84981ee03f..3cd426e971db 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
#define BLK_MAX_CDB 16
/*
- * try to put the fields that are referenced together in the same cacheline.
- * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
- * as well!
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- struct work_struct mq_flush_work;
unsigned long fifo_time;
};
@@ -178,7 +178,6 @@ struct request {
unsigned short ioprio;
void *special; /* opaque pointer available for LLD use */
- char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
@@ -336,7 +335,7 @@ struct request_queue {
unsigned int *mq_map;
/* sw queues */
- struct blk_mq_ctx *queue_ctx;
+ struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
/* hw dispatch queues */
@@ -463,6 +462,10 @@ struct request_queue {
struct request *flush_rq;
spinlock_t mq_flush_lock;
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+
struct mutex sysfs_lock;
int bypass_depth;
@@ -481,6 +484,9 @@ struct request_queue {
wait_queue_head_t mq_freeze_wq;
struct percpu_counter mq_usage_counter;
struct list_head all_q_node;
+
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -504,6 +510,7 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -613,6 +620,15 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+/*
+ * Driver can handle struct request, if it either has an old style
+ * request_fn defined, or is blk-mq based.
+ */
+static inline bool queue_is_rq_based(struct request_queue *q)
+{
+ return q->request_fn || q->mq_ops;
+}
+
static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
return q->limits.cluster;
@@ -937,6 +953,7 @@ extern struct request *blk_fetch_request(struct request_queue *q);
*/
extern bool blk_update_request(struct request *rq, int error,
unsigned int nr_bytes);
+extern void blk_finish_request(struct request *rq, int error);
extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
@@ -1053,7 +1070,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
@@ -1102,7 +1118,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
/*
* tag stuff
*/
-#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
+#define blk_rq_tagged(rq) \
+ ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1370,8 +1387,9 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
-int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
+int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
@@ -1570,6 +1588,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t,
@@ -1588,7 +1607,13 @@ struct block_device_operations {
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
+extern int bdev_read_page(struct block_device *, sector_t, struct page *);
+extern int bdev_write_page(struct block_device *, sector_t, struct page *,
+ struct writeback_control *);
#else /* CONFIG_BLOCK */
+
+struct block_device;
+
/*
* stubs for when the block layer is configured out
*/
@@ -1624,6 +1649,12 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
return false;
}
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLOCK */
#endif