summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2014-05-31 18:43:36 +0200
committerJens Axboe <axboe@fb.com>2014-06-04 05:04:38 +0200
commit1aecfe4887713838c79bc52f774609a57db4f988 (patch)
treef2b2baf54092829ab1fc3d97087ac6b45b89cc04 /block
parentMerge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
downloadlinux-1aecfe4887713838c79bc52f774609a57db4f988.tar.xz
linux-1aecfe4887713838c79bc52f774609a57db4f988.zip
blk-mq: move blk_mq_get_ctx/blk_mq_put_ctx to mq private header
The blk-mq tag code need these helpers. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c22
-rw-r--r--block/blk-mq.h22
2 files changed, 22 insertions, 22 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0f5879c42dcd..b9230c522c6b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list);
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- unsigned int cpu)
-{
- return per_cpu_ptr(q->queue_ctx, cpu);
-}
-
-/*
- * This assumes per-cpu software queueing queues. They could be per-node
- * as well, for instance. For now this is hardcoded as-is. Note that we don't
- * care about preemption, since we know the ctx's are persistent. This does
- * mean that we can't rely on ctx always matching the currently running CPU.
- */
-static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
-{
- return __blk_mq_get_ctx(q, get_cpu());
-}
-
-static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-{
- put_cpu();
-}
-
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
diff --git a/block/blk-mq.h b/block/blk-mq.h
index de7b3bbd5bd6..57a7968e47b3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -69,4 +69,26 @@ struct blk_align_bitmap {
unsigned long depth;
} ____cacheline_aligned_in_smp;
+static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ unsigned int cpu)
+{
+ return per_cpu_ptr(q->queue_ctx, cpu);
+}
+
+/*
+ * This assumes per-cpu software queueing queues. They could be per-node
+ * as well, for instance. For now this is hardcoded as-is. Note that we don't
+ * care about preemption, since we know the ctx's are persistent. This does
+ * mean that we can't rely on ctx always matching the currently running CPU.
+ */
+static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+{
+ return __blk_mq_get_ctx(q, get_cpu());
+}
+
+static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ put_cpu();
+}
+
#endif