summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2021-12-13 19:51:29 +0100
committerVinod Koul <vkoul@kernel.org>2022-01-05 08:41:22 +0100
commit23a50c8035655c5a1d9b52c878b3ebf7b6b83eea (patch)
tree1c7afe044a650d1d3045390a414f6f7773cd79b1 /drivers/dma
parentdmaengine: idxd: embed irq_entry in idxd_wq struct (diff)
downloadlinux-23a50c8035655c5a1d9b52c878b3ebf7b6b83eea.tar.xz
linux-23a50c8035655c5a1d9b52c878b3ebf7b6b83eea.zip
dmaengine: idxd: fix descriptor flushing locking
The descriptor flushing for shutdown is not holding the irq_entry list lock. If there's ongoing interrupt completion handling, this can corrupt the list. Add locking to protect list walking. Also refactor the code so it's more compact. Fixes: 8f47d1a5e545 ("dmaengine: idxd: connect idxd to dmaengine subsystem") Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/163942148935.2412839.18282664745572777280.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/idxd/init.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 29c732a94027..03c735727f68 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -689,26 +689,28 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
-static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
{
struct idxd_desc *desc, *itr;
struct llist_node *head;
+ LIST_HEAD(flist);
+ enum idxd_complete_type ctype;
+ spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
- if (!head)
- return;
-
- llist_for_each_entry_safe(desc, itr, head, llnode)
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
-}
+ if (head) {
+ llist_for_each_entry_safe(desc, itr, head, llnode)
+ list_add_tail(&desc->list, &ie->work_list);
+ }
-static void idxd_flush_work_list(struct idxd_irq_entry *ie)
-{
- struct idxd_desc *desc, *iter;
+ list_for_each_entry_safe(desc, itr, &ie->work_list, list)
+ list_move_tail(&desc->list, &flist);
+ spin_unlock(&ie->list_lock);
- list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+ list_for_each_entry_safe(desc, itr, &flist, list) {
list_del(&desc->list);
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ idxd_dma_complete_txd(desc, ctype, true);
}
}
@@ -762,8 +764,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
synchronize_irq(irq_entry->vector);
if (i == 0)
continue;
- idxd_flush_pending_llist(irq_entry);
- idxd_flush_work_list(irq_entry);
+ idxd_flush_pending_descs(irq_entry);
}
flush_workqueue(idxd->wq);
}