summaryrefslogtreecommitdiffstats
path: root/io_uring/kbuf.h
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/kbuf.h')
-rw-r--r--io_uring/kbuf.h94
1 files changed, 67 insertions, 27 deletions
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index b90aca3a57fa..36aadfe5ac00 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -4,6 +4,16 @@
#include <uapi/linux/io_uring.h>
+enum {
+ /* ring mapped provided buffers */
+ IOBL_BUF_RING = 1,
+ /* ring mapped provided buffers, but mmap'ed by application */
+ IOBL_MMAP = 2,
+ /* buffers are consumed incrementally rather than always fully */
+ IOBL_INC = 4,
+
+};
+
struct io_buffer_list {
/*
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
@@ -25,12 +35,9 @@ struct io_buffer_list {
__u16 head;
__u16 mask;
- atomic_t refs;
+ __u16 flags;
- /* ring mapped provided buffers */
- __u8 is_buf_ring;
- /* ring mapped provided buffers, but mmap'ed by application */
- __u8 is_mmap;
+ atomic_t refs;
};
struct io_buffer {
@@ -52,8 +59,8 @@ struct buf_sel_arg {
struct iovec *iovs;
size_t out_len;
size_t max_len;
- int nr_iovs;
- int mode;
+ unsigned short nr_iovs;
+ unsigned short mode;
};
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
@@ -73,7 +80,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
-void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
@@ -117,25 +124,55 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false;
}
-static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
+/* Mapped buffer ring, return io_uring_buf from head */
+#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
+
+static inline bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
+{
+ if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
+ return true;
+
+ req->flags &= ~REQ_F_BUFFERS_COMMIT;
+
+ if (unlikely(len < 0))
+ return true;
+
+ if (bl->flags & IOBL_INC) {
+ struct io_uring_buf *buf;
+
+ buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+ if (WARN_ON_ONCE(len > buf->len))
+ len = buf->len;
+ buf->len -= len;
+ if (buf->len) {
+ buf->addr += len;
+ return false;
+ }
+ }
+
+ bl->head += nr;
+ return true;
+}
+
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
{
struct io_buffer_list *bl = req->buf_list;
+ bool ret = true;
if (bl) {
- if (req->flags & REQ_F_BUFFERS_COMMIT) {
- bl->head += nr;
- req->flags &= ~REQ_F_BUFFERS_COMMIT;
- }
+ ret = io_kbuf_commit(req, bl, len, nr);
req->buf_index = bl->bgid;
}
req->flags &= ~REQ_F_BUFFER_RING;
+ return ret;
}
-static inline void __io_put_kbuf_list(struct io_kiocb *req,
+static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
struct list_head *list)
{
if (req->flags & REQ_F_BUFFER_RING) {
- __io_put_kbuf_ring(req, 1);
+ __io_put_kbuf_ring(req, len, 1);
} else {
req->buf_index = req->kbuf->bgid;
list_add(&req->kbuf->list, list);
@@ -150,11 +187,12 @@ static inline void io_kbuf_drop(struct io_kiocb *req)
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return;
- __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
+ /* len == 0 is fine here, non-ring will always drop all of it */
+ __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
}
-static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
- unsigned issue_flags)
+static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
+ int nbufs, unsigned issue_flags)
{
unsigned int ret;
@@ -162,22 +200,24 @@ static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
return 0;
ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
- if (req->flags & REQ_F_BUFFER_RING)
- __io_put_kbuf_ring(req, nbufs);
- else
- __io_put_kbuf(req, issue_flags);
+ if (req->flags & REQ_F_BUFFER_RING) {
+ if (!__io_put_kbuf_ring(req, len, nbufs))
+ ret |= IORING_CQE_F_BUF_MORE;
+ } else {
+ __io_put_kbuf(req, len, issue_flags);
+ }
return ret;
}
-static inline unsigned int io_put_kbuf(struct io_kiocb *req,
+static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
unsigned issue_flags)
{
- return __io_put_kbufs(req, 1, issue_flags);
+ return __io_put_kbufs(req, len, 1, issue_flags);
}
-static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs,
- unsigned issue_flags)
+static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
+ int nbufs, unsigned issue_flags)
{
- return __io_put_kbufs(req, nbufs, issue_flags);
+ return __io_put_kbufs(req, len, nbufs, issue_flags);
}
#endif