summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h75
1 files changed, 64 insertions, 11 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0d84981ee03f..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
#define BLK_MAX_CDB 16
/*
- * try to put the fields that are referenced together in the same cacheline.
- * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
- * as well!
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- struct work_struct mq_flush_work;
unsigned long fifo_time;
};
@@ -178,7 +178,6 @@ struct request {
unsigned short ioprio;
void *special; /* opaque pointer available for LLD use */
- char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
@@ -281,6 +280,7 @@ struct queue_limits {
unsigned long seg_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
@@ -336,7 +336,7 @@ struct request_queue {
unsigned int *mq_map;
/* sw queues */
- struct blk_mq_ctx *queue_ctx;
+ struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
/* hw dispatch queues */
@@ -463,6 +463,10 @@ struct request_queue {
struct request *flush_rq;
spinlock_t mq_flush_lock;
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+
struct mutex sysfs_lock;
int bypass_depth;
@@ -481,6 +485,9 @@ struct request_queue {
wait_queue_head_t mq_freeze_wq;
struct percpu_counter mq_usage_counter;
struct list_head all_q_node;
+
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -504,6 +511,8 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -613,6 +622,15 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+/*
+ * Driver can handle struct request, if it either has an old style
+ * request_fn defined, or is blk-mq based.
+ */
+static inline bool queue_is_rq_based(struct request_queue *q)
+{
+ return q->request_fn || q->mq_ops;
+}
+
static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
return q->limits.cluster;
@@ -779,6 +797,7 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern struct request *blk_make_request(struct request_queue *, struct bio *,
gfp_t);
+extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
@@ -894,6 +913,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
return q->limits.max_sectors;
}
+/*
+ * Return maximum size of a request at given offset. Only valid for
+ * file system requests.
+ */
+static inline unsigned int blk_max_size_offset(struct request_queue *q,
+ sector_t offset)
+{
+ if (!q->limits.chunk_sectors)
+ return q->limits.max_sectors;
+
+ return q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1));
+}
+
static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -901,7 +934,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors;
- return blk_queue_get_max_sectors(q, rq->cmd_flags);
+ if (!q->limits.chunk_sectors)
+ return blk_queue_get_max_sectors(q, rq->cmd_flags);
+
+ return min(blk_max_size_offset(q, blk_rq_pos(rq)),
+ blk_queue_get_max_sectors(q, rq->cmd_flags));
}
static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -937,6 +974,7 @@ extern struct request *blk_fetch_request(struct request_queue *q);
*/
extern bool blk_update_request(struct request *rq, int error,
unsigned int nr_bytes);
+extern void blk_finish_request(struct request *rq, int error);
extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
@@ -966,6 +1004,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
@@ -1053,7 +1092,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
@@ -1102,7 +1140,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
/*
* tag stuff
*/
-#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
+#define blk_rq_tagged(rq) \
+ ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1370,8 +1409,9 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
-int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
+int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
@@ -1570,6 +1610,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t,
@@ -1588,7 +1629,13 @@ struct block_device_operations {
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
+extern int bdev_read_page(struct block_device *, sector_t, struct page *);
+extern int bdev_write_page(struct block_device *, sector_t, struct page *,
+ struct writeback_control *);
#else /* CONFIG_BLOCK */
+
+struct block_device;
+
/*
* stubs for when the block layer is configured out
*/
@@ -1624,6 +1671,12 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
return false;
}
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLOCK */
#endif