summaryrefslogtreecommitdiffstats
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
authorHao Xu <howeyxu@tencent.com>2022-06-22 07:55:51 +0200
committerJens Axboe <axboe@kernel.dk>2022-07-25 02:39:15 +0200
commit024b8fde3320ea34d7a5a3fc9dbc47ec736cd8eb (patch)
treec0fa4f3f2d38bc291267e0d1b7f4ed180f06855b /io_uring/kbuf.c
parentio_uring: trace task_work_run (diff)
downloadlinux-024b8fde3320ea34d7a5a3fc9dbc47ec736cd8eb.tar.xz
linux-024b8fde3320ea34d7a5a3fc9dbc47ec736cd8eb.zip
io_uring: kbuf: kill __io_kbuf_recycle()
__io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and tweak the code so that the legacy pbuf and ring pbuf code become clear Signed-off-by: Hao Xu <howeyxu@tencent.com> Link: https://lore.kernel.org/r/20220622055551.642370-1-hao.xu@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c71
1 files changed, 42 insertions, 29 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 62de0dda24bf..8bf47e49ea5b 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -37,36 +37,30 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
return xa_load(&ctx->io_bl_xa, bgid);
}
-void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static int io_buffer_add_list(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl, unsigned int bgid)
+{
+ bl->bgid = bgid;
+ if (bgid < BGID_ARRAY)
+ return 0;
+
+ return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+}
+
+void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
struct io_buffer *buf;
/*
- * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
- * the flag and hence ensure that bl->head doesn't get incremented.
- * If the tail has already been incremented, hang on to it.
+ * For legacy provided buffer mode, don't recycle if we already did
+ * IO to this buffer. For ring-mapped provided buffer mode, we should
+ * increment ring->head to explicitly monopolize the buffer to avoid
+ * multiple use.
*/
- if (req->flags & REQ_F_BUFFER_RING) {
- if (req->buf_list) {
- if (req->flags & REQ_F_PARTIAL_IO) {
- /*
- * If we end up here, then the io_uring_lock has
- * been kept held since we retrieved the buffer.
- * For the io-wq case, we already cleared
- * req->buf_list when the buffer was retrieved,
- * hence it cannot be set here for that case.
- */
- req->buf_list->head++;
- req->buf_list = NULL;
- } else {
- req->buf_index = req->buf_list->bgid;
- req->flags &= ~REQ_F_BUFFER_RING;
- }
- }
+ if (req->flags & REQ_F_PARTIAL_IO)
return;
- }
io_ring_submit_lock(ctx, issue_flags);
@@ -77,16 +71,35 @@ void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
req->buf_index = buf->bgid;
io_ring_submit_unlock(ctx, issue_flags);
+ return;
}
-static int io_buffer_add_list(struct io_ring_ctx *ctx,
- struct io_buffer_list *bl, unsigned int bgid)
+void io_kbuf_recycle_ring(struct io_kiocb *req)
{
- bl->bgid = bgid;
- if (bgid < BGID_ARRAY)
- return 0;
-
- return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+ /*
+ * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+ * the flag and hence ensure that bl->head doesn't get incremented.
+ * If the tail has already been incremented, hang on to it.
+ * The exception is partial io, that case we should increment bl->head
+ * to monopolize the buffer.
+ */
+ if (req->buf_list) {
+ if (req->flags & REQ_F_PARTIAL_IO) {
+ /*
+ * If we end up here, then the io_uring_lock has
+ * been kept held since we retrieved the buffer.
+ * For the io-wq case, we already cleared
+ * req->buf_list when the buffer was retrieved,
+ * hence it cannot be set here for that case.
+ */
+ req->buf_list->head++;
+ req->buf_list = NULL;
+ } else {
+ req->buf_index = req->buf_list->bgid;
+ req->flags &= ~REQ_F_BUFFER_RING;
+ }
+ }
+ return;
}
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)