summaryrefslogtreecommitdiffstats
path: root/drivers/dma/qcom
diff options
context:
space:
mode:
authorSinan Kaya <okaya@codeaurora.org>2016-10-21 18:37:57 +0200
committerVinod Koul <vinod.koul@intel.com>2016-11-03 14:25:45 +0100
commit9483d9ae09452ad4cdf7f0bb0c0fae2775278c85 (patch)
tree0e604f332f066cf3291de95bfd4ae3662bea5f96 /drivers/dma/qcom
parentdmaengine: qcom_hidma: make pending_tre_count atomic (diff)
downloadlinux-9483d9ae09452ad4cdf7f0bb0c0fae2775278c85.tar.xz
linux-9483d9ae09452ad4cdf7f0bb0c0fae2775278c85.zip
dmaengine: qcom_hidma: bring out interrupt cause
Bring out the interrupt cause to the top level so that MSI interrupts can be hooked at a later stage. Signed-off-by: Sinan Kaya <okaya@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/qcom')
-rw-r--r--drivers/dma/qcom/hidma_ll.c62
1 files changed, 33 insertions, 29 deletions
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 114409e7eec1..9193f466c9e7 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -418,12 +418,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
- struct hidma_lldev *lldev = arg;
- u32 status;
- u32 enable;
- u32 cause;
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
/*
* Fine tuned for this HW...
@@ -432,35 +444,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
*/
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
cause = status & enable;
while (cause) {
- if (cause & HIDMA_ERR_INT_MASK) {
- dev_err(lldev->dev, "error 0x%x, disabling...\n",
- cause);
-
- /* Clear out pending interrupts */
- writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
- /* No further submissions. */
- hidma_ll_disable(lldev);
-
- /* Driver completes the txn and intimates the client.*/
- hidma_cleanup_pending_tre(lldev, 0xFF,
- HIDMA_EVRE_STATUS_ERROR);
- goto out;
- }
-
- /*
- * Try to consume as many EVREs as possible.
- */
- hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ hidma_ll_int_handler_internal(lldev, cause);
/*
* Another interrupt might have arrived while we are
@@ -471,7 +476,6 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
cause = status & enable;
}
-out:
return IRQ_HANDLED;
}