summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-18 02:50:29 +0100
committerJens Axboe <axboe@kernel.dk>2019-12-18 03:57:27 +0100
commitb29472ee7b53784f44011069fad15e539fd25bcf (patch)
treeddbf7808f953b62e1f66bcd1b59bdf1d6c91cf90 /fs
parentio_uring: make IORING_OP_CANCEL_ASYNC deferrable (diff)
downloadlinux-b29472ee7b53784f44011069fad15e539fd25bcf.tar.xz
linux-b29472ee7b53784f44011069fad15e539fd25bcf.zip
io_uring: make IORING_OP_TIMEOUT_REMOVE deferrable
If we defer this command as part of a link, we have to make sure that the SQE data has been read upfront. Integrate the timeout remove op into the prep handling to make it safe for SQE reuse. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c44
1 files changed, 34 insertions, 10 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1d6a5083f37f..9d4f8274ee1e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -326,6 +326,12 @@ struct io_cancel {
u64 addr;
};
+struct io_timeout {
+ struct file *file;
+ u64 addr;
+ int flags;
+};
+
struct io_async_connect {
struct sockaddr_storage address;
};
@@ -368,6 +374,7 @@ struct io_kiocb {
struct io_accept accept;
struct io_sync sync;
struct io_cancel cancel;
+ struct io_timeout timeout;
};
const struct io_uring_sqe *sqe;
@@ -2818,26 +2825,40 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
return 0;
}
+static int io_timeout_remove_prep(struct io_kiocb *req)
+{
+ const struct io_uring_sqe *sqe = req->sqe;
+
+ if (req->flags & REQ_F_PREPPED)
+ return 0;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ return -EINVAL;
+
+ req->timeout.addr = READ_ONCE(sqe->addr);
+ req->timeout.flags = READ_ONCE(sqe->timeout_flags);
+ if (req->timeout.flags)
+ return -EINVAL;
+
+ req->flags |= REQ_F_PREPPED;
+ return 0;
+}
+
/*
* Remove or update an existing timeout command
*/
static int io_timeout_remove(struct io_kiocb *req)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
- unsigned flags;
int ret;
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
- return -EINVAL;
- flags = READ_ONCE(sqe->timeout_flags);
- if (flags)
- return -EINVAL;
+ ret = io_timeout_remove_prep(req);
+ if (ret)
+ return ret;
spin_lock_irq(&ctx->completion_lock);
- ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
+ ret = io_timeout_cancel(ctx, req->timeout.addr);
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
@@ -3108,6 +3129,9 @@ static int io_req_defer_prep(struct io_kiocb *req)
case IORING_OP_TIMEOUT:
ret = io_timeout_prep(req, io, false);
break;
+ case IORING_OP_TIMEOUT_REMOVE:
+ ret = io_timeout_remove_prep(req);
+ break;
case IORING_OP_ASYNC_CANCEL:
ret = io_async_cancel_prep(req);
break;