diff options
author | Tudor Ambarus <tudor.ambarus@microchip.com> | 2020-01-23 15:03:11 +0100 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2020-02-25 06:57:27 +0100 |
commit | 078a6506141a4ce76bee6c257e9b14f5c606ee4c (patch) | |
tree | 03227350ce44872ded5f4aa3c138c933e11c99ae /drivers/dma/at_hdmac.c | |
parent | dmaengine: at_hdmac: Switch atomic allocations to GFP_NOWAIT (diff) | |
download | linux-078a6506141a4ce76bee6c257e9b14f5c606ee4c.tar.xz linux-078a6506141a4ce76bee6c257e9b14f5c606ee4c.zip |
dmaengine: at_hdmac: Fix deadlocks
Fix the following deadlocks:
1/ atc_handle_cyclic() and atc_chain_complete() called
dmaengine_desc_get_callback_invoke() while wrongly holding the
atchan->lock. Clients can set the callback to dmaengine_terminate_sync()
which will end up trying to get the same lock, thus a deadlock occurred.
2/ dma_run_dependencies() was called with the atchan->lock held, but the
method calls device_issue_pending() which tries to get the same lock,
and so a deadlock occurred.
The driver must not hold the lock when invoking the callback or when
running dependencies. Releasing the spinlock within a called function
before calling the callback is not a nice thing to do -> called functions
become non-atomic when called within an atomic region. Thus the lock is
now taken in the child routines whereever is needed.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
Link: https://lore.kernel.org/r/20200123140237.125799-6-tudor.ambarus@microchip.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r-- | drivers/dma/at_hdmac.c | 74 |
1 files changed, 39 insertions, 35 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 8e8e04bd1b28..73a20780744b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -426,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * atc_chain_complete - finish work for one transaction chain * @atchan: channel we work on * @desc: descriptor at the head of the chain we want do complete - * - * Called with atchan->lock held and bh disabled */ + */ static void atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + unsigned long flags; dev_vdbg(chan2dev(&atchan->chan_common), "descriptor %u complete\n", txd->cookie); + spin_lock_irqsave(&atchan->lock, flags); + /* mark the descriptor as complete for non cyclic cases only */ if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); @@ -453,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); + spin_unlock_irqrestore(&atchan->lock, flags); + dma_descriptor_unmap(txd); /* for cyclic transfers, * no need to replay callback function while stopping */ - if (!atc_chan_is_cyclic(atchan)) { - /* - * The API requires that no submissions are done from a - * callback, so we don't need to drop the lock here - */ + if (!atc_chan_is_cyclic(atchan)) dmaengine_desc_get_callback_invoke(txd, NULL); - } dma_run_dependencies(txd); } @@ -480,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan) { struct at_desc *desc, *_desc; LIST_HEAD(list); + unsigned long flags; dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); + spin_lock_irqsave(&atchan->lock, flags); + /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. @@ -494,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan) /* empty queue list by moving descriptors (if any) to active_list */ list_splice_init(&atchan->queue, &atchan->active_list); + spin_unlock_irqrestore(&atchan->lock, flags); + list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); } @@ -501,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan) /** * atc_advance_work - at the end of a transaction, move forward * @atchan: channel where the transaction ended - * - * Called with atchan->lock held and bh disabled */ static void atc_advance_work(struct at_dma_chan *atchan) { + unsigned long flags; + int ret; + dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); - if (atc_chan_is_enabled(atchan)) + spin_lock_irqsave(&atchan->lock, flags); + ret = atc_chan_is_enabled(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); + if (ret) return; if (list_empty(&atchan->active_list) || - list_is_singular(&atchan->active_list)) { - atc_complete_all(atchan); - } else { - atc_chain_complete(atchan, atc_first_active(atchan)); - /* advance work */ - atc_dostart(atchan, atc_first_active(atchan)); - } + list_is_singular(&atchan->active_list)) + return atc_complete_all(atchan); + + atc_chain_complete(atchan, atc_first_active(atchan)); + + /* advance work */ + spin_lock_irqsave(&atchan->lock, flags); + atc_dostart(atchan, atc_first_active(atchan)); + spin_unlock_irqrestore(&atchan->lock, flags); } /** * atc_handle_error - handle errors reported by DMA controller * @atchan: channel where error occurs - * - * Called with atchan->lock held and bh disabled */ static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; struct at_desc *child; + unsigned long flags; + spin_lock_irqsave(&atchan->lock, flags); /* * The descriptor currently at the head of the active list is * broked. Since we don't have any way to report errors, we'll @@ -564,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan) list_for_each_entry(child, &bad_desc->tx_list, desc_node) atc_dump_lli(atchan, &child->lli); + spin_unlock_irqrestore(&atchan->lock, flags); + /* Pretend the descriptor completed successfully */ atc_chain_complete(atchan, bad_desc); } @@ -571,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan) /** * atc_handle_cyclic - at the end of a period, run callback function * @atchan: channel used for cyclic operations - * - * Called with atchan->lock held and bh disabled */ static void atc_handle_cyclic(struct at_dma_chan *atchan) { @@ -591,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) static void atc_tasklet(unsigned long data) { struct at_dma_chan *atchan = (struct at_dma_chan *)data; - unsigned long flags; - spin_lock_irqsave(&atchan->lock, flags); if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) - atc_handle_error(atchan); - else if (atc_chan_is_cyclic(atchan)) - atc_handle_cyclic(atchan); - else - atc_advance_work(atchan); + return atc_handle_error(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); + if (atc_chan_is_cyclic(atchan)) + return atc_handle_cyclic(atchan); + + atc_advance_work(atchan); } static irqreturn_t at_dma_interrupt(int irq, void *dev_id) @@ -1437,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan) list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->active_list, &list); + spin_unlock_irqrestore(&atchan->lock, flags); + /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_chain_complete(atchan, desc); @@ -1445,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan) /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); - spin_unlock_irqrestore(&atchan->lock, flags); - return 0; } @@ -1507,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan, static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); @@ -1515,9 +1521,7 @@ static void atc_issue_pending(struct dma_chan *chan) if (atc_chan_is_cyclic(atchan)) return; - spin_lock_irqsave(&atchan->lock, flags); atc_advance_work(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); } /** |