diff options
author | Breno Leitao <leitao@debian.org> | 2023-01-18 16:56:30 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-01-29 23:17:41 +0100 |
commit | c1755c25a7190494b45861284b4a30bd9cd813ff (patch) | |
tree | 42cb114ed4fb6907e56be37efee960c53f41985d /io_uring | |
parent | io_uring: handle TIF_NOTIFY_RESUME when checking for task_work (diff) | |
download | linux-c1755c25a7190494b45861284b4a30bd9cd813ff.tar.xz linux-c1755c25a7190494b45861284b4a30bd9cd813ff.zip |
io_uring: Enable KASAN for request cache
Every io_uring request is represented by struct io_kiocb, which is
cached locally by io_uring (not SLAB/SLUB) in the list called
submit_state.freelist. This patch simply enabled KASAN for this free
list.
This list is initially created by KMEM_CACHE, but later, managed by
io_uring. This patch basically poisons the objects that are not used
(i.e., they are the free list), and unpoisons it when the object is
allocated/removed from the list.
Touching these poisoned objects while in the freelist will cause a KASAN
warning.
Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 3 | ||||
-rw-r--r-- | io_uring/io_uring.h | 11 |
2 files changed, 10 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c84dce6944db..407834255a59 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -151,7 +151,7 @@ static void io_move_task_work_from_local(struct io_ring_ctx *ctx); static void __io_submit_flush_completions(struct io_ring_ctx *ctx); static __cold void io_fallback_tw(struct io_uring_task *tctx); -static struct kmem_cache *req_cachep; +struct kmem_cache *req_cachep; struct sock *io_uring_get_socket(struct file *file) { @@ -230,6 +230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res) static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) { wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); + kasan_poison_object_data(req_cachep, req); } static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index d58cfe062da9..9270156288aa 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -4,6 +4,7 @@ #include <linux/errno.h> #include <linux/lockdep.h> #include <linux/resume_user_mode.h> +#include <linux/kasan.h> #include <linux/io_uring_types.h> #include <uapi/linux/eventpoll.h> #include "io-wq.h" @@ -347,12 +348,16 @@ static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) return true; } +extern struct kmem_cache *req_cachep; + static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { - struct io_wq_work_node *node; + struct io_kiocb *req; - node = wq_stack_extract(&ctx->submit_state.free_list); - return container_of(node, struct io_kiocb, comp_list); + req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); + kasan_unpoison_object_data(req_cachep, req); + wq_stack_extract(&ctx->submit_state.free_list); + return req; } static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |