summaryrefslogtreecommitdiffstats
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-11-05 21:51:51 +0100
committerJens Axboe <axboe@kernel.dk>2019-11-05 21:53:53 +0100
commit6f72653e76a511db47addad6ab690390233fc250 (patch)
tree216216d37d21915d4ea542cacebc9ed986cbd000 /fs/io-wq.c
parentMAINTAINERS: update io_uring entry (diff)
downloadlinux-6f72653e76a511db47addad6ab690390233fc250.tar.xz
linux-6f72653e76a511db47addad6ab690390233fc250.zip
io-wq: use proper nesting IRQ disabling spinlocks for cancel
We don't know what context we'll be called in for cancel, it could very well be with IRQs disabled already. Use the IRQ saving variants of the locking primitives. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 3bbab2c58695..ba40a7ee31c3 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -642,19 +642,20 @@ static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
{
struct io_cb_cancel_data *data = cancel_data;
struct io_wqe *wqe = data->wqe;
+ unsigned long flags;
bool ret = false;
/*
* Hold the lock to avoid ->cur_work going out of scope, caller
* may deference the passed in work.
*/
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
if (worker->cur_work &&
data->cancel(worker->cur_work, data->caller_data)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
return ret;
}
@@ -669,9 +670,10 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
.caller_data = cancel_data,
};
struct io_wq_work *work;
+ unsigned long flags;
bool found = false;
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (cancel(work, cancel_data)) {
list_del(&work->list);
@@ -679,7 +681,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
break;
}
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
@@ -733,6 +735,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
struct io_wq_work *cwork)
{
struct io_wq_work *work;
+ unsigned long flags;
bool found = false;
cwork->flags |= IO_WQ_WORK_CANCEL;
@@ -742,7 +745,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
*/
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (work == cwork) {
list_del(&work->list);
@@ -750,7 +753,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
break;
}
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;