diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-02-20 05:38:59 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-02-27 19:52:45 +0100 |
commit | c3f9109dbc9e2cd0b2c3ba0536431eef282783e9 (patch) | |
tree | e7af3ead4af50beb71f1c57d9d0241c66c2c7a74 /io_uring | |
parent | io_uring/net: improve the usercopy for sendmsg/recvmsg (diff) | |
download | linux-c3f9109dbc9e2cd0b2c3ba0536431eef282783e9.tar.xz linux-c3f9109dbc9e2cd0b2c3ba0536431eef282783e9.zip |
io_uring/kbuf: flag request if buffer pool is empty after buffer pick
Normally we do an extra roundtrip for retries even if the buffer pool has
depleted, as we don't check that upfront. Rather than add this check, have
the buffer selection methods mark the request with REQ_F_BL_EMPTY if the
used buffer group is out of buffers after this selection. This is very
cheap to do once we're all the way inside there anyway, and it gives the
caller a chance to make better decisions on how to proceed.
For example, recv/recvmsg multishot could check this flag when it
decides whether to keep receiving or not.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/kbuf.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index ee866d646997..3d257ed9031b 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -139,6 +139,8 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, list_del(&kbuf->list); if (*len == 0 || *len > kbuf->len) *len = kbuf->len; + if (list_empty(&bl->buf_list)) + req->flags |= REQ_F_BL_EMPTY; req->flags |= REQ_F_BUFFER_SELECTED; req->kbuf = kbuf; req->buf_index = kbuf->bid; @@ -152,12 +154,16 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags) { struct io_uring_buf_ring *br = bl->buf_ring; + __u16 tail, head = bl->head; struct io_uring_buf *buf; - __u16 head = bl->head; - if (unlikely(smp_load_acquire(&br->tail) == head)) + tail = smp_load_acquire(&br->tail); + if (unlikely(tail == head)) return NULL; + if (head + 1 == tail) + req->flags |= REQ_F_BL_EMPTY; + head &= bl->mask; /* mmaped buffers are always contig */ if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) { |