diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-21 00:03:25 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-01-21 00:03:25 +0100 |
commit | 65163d16fcaef37733b5f273ffe4d00d731b34de (patch) | |
tree | edac27d7202e2b7c0a250aa836cf8d3f50fda8b2 /drivers/dma/dma-axi-dmac.c | |
parent | Merge tag 'coccinelle-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | dmaengine: dw-edma: increase size of 'name' in debugfs code (diff) | |
download | linux-65163d16fcaef37733b5f273ffe4d00d731b34de.tar.xz linux-65163d16fcaef37733b5f273ffe4d00d731b34de.zip |
Merge tag 'dmaengine-fix-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
"New support:
- Loongson LS2X APB DMA controller
- sf-pdma: mpfs-pdma support
- Qualcomm X1E80100 GPI dma controller support
Updates:
- Xilinx XDMA updates to support interleaved DMA transfers
- TI PSIL threads for AM62P and J722S and cfg register regions
description
- axi-dmac Improving the cyclic DMA transfers
- Tegra Support dma-channel-mask property
- Remaining platform remove callback returning void conversions
Driver fixes for:
- Xilinx xdma driver operator precedence and initialization fix
- Excess kernel-doc warning fix in imx-sdma xilinx xdma drivers
- format-overflow warning fix for rz-dmac, sh usb dmac drivers
- 'output may be truncated' fix for shdma, fsl-qdma and dw-edma
drivers"
* tag 'dmaengine-fix-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (58 commits)
dmaengine: dw-edma: increase size of 'name' in debugfs code
dmaengine: fsl-qdma: increase size of 'irq_name'
dmaengine: shdma: increase size of 'dev_id'
dmaengine: xilinx: xdma: Fix kernel-doc warnings
dmaengine: usb-dmac: Avoid format-overflow warning
dmaengine: sh: rz-dmac: Avoid format-overflow warning
dmaengine: imx-sdma: fix Excess kernel-doc warnings
dmaengine: xilinx: xdma: Fix initialization location of desc in xdma_channel_isr()
dmaengine: xilinx: xdma: Fix operator precedence in xdma_prep_interleaved_dma()
dmaengine: xilinx: xdma: statify xdma_prep_interleaved_dma
dmaengine: xilinx: xdma: Workaround truncation compilation error
dmaengine: pl330: issue_pending waits until WFP state
dmaengine: xilinx: xdma: Implement interleaved DMA transfers
dmaengine: xilinx: xdma: Prepare the introduction of interleaved DMA transfers
dmaengine: xilinx: xdma: Add transfer error reporting
dmaengine: xilinx: xdma: Add error checking in xdma_channel_isr()
dmaengine: xilinx: xdma: Rework xdma_terminate_all()
dmaengine: xilinx: xdma: Ease dma_pool alignment requirements
dmaengine: xilinx: xdma: Add necessary macro definitions
dmaengine: xilinx: xdma: Get rid of unused code
...
Diffstat (limited to 'drivers/dma/dma-axi-dmac.c')
-rw-r--r-- | drivers/dma/dma-axi-dmac.c | 280 |
1 files changed, 191 insertions, 89 deletions
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 2457a420c13d..4e339c04fc1e 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -81,9 +81,13 @@ #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 +#define AXI_DMAC_REG_CURRENT_SG_ID 0x454 +#define AXI_DMAC_REG_SG_ADDRESS 0x47c +#define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc #define AXI_DMAC_CTRL_ENABLE BIT(0) #define AXI_DMAC_CTRL_PAUSE BIT(1) +#define AXI_DMAC_CTRL_ENABLE_SG BIT(2) #define AXI_DMAC_IRQ_SOT BIT(0) #define AXI_DMAC_IRQ_EOT BIT(1) @@ -97,20 +101,35 @@ /* The maximum ID allocated by the hardware is 31 */ #define AXI_DMAC_SG_UNUSED 32U +/* Flags for axi_dmac_hw_desc.flags */ +#define AXI_DMAC_HW_FLAG_LAST BIT(0) +#define AXI_DMAC_HW_FLAG_IRQ BIT(1) + +struct axi_dmac_hw_desc { + u32 flags; + u32 id; + u64 dest_addr; + u64 src_addr; + u64 next_sg_addr; + u32 y_len; + u32 x_len; + u32 src_stride; + u32 dst_stride; + u64 __pad[2]; +}; + struct axi_dmac_sg { - dma_addr_t src_addr; - dma_addr_t dest_addr; - unsigned int x_len; - unsigned int y_len; - unsigned int dest_stride; - unsigned int src_stride; - unsigned int id; unsigned int partial_len; bool schedule_when_free; + + struct axi_dmac_hw_desc *hw; + dma_addr_t hw_phys; }; struct axi_dmac_desc { struct virt_dma_desc vdesc; + struct axi_dmac_chan *chan; + bool cyclic; bool have_partial_xfer; @@ -139,6 +158,7 @@ struct axi_dmac_chan { bool hw_partial_xfer; bool hw_cyclic; bool hw_2d; + bool hw_sg; }; struct axi_dmac { @@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) unsigned int flags = 0; unsigned int val; - val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); - if (val) /* Queue is full, wait for the next SOT IRQ */ - return; + if (!chan->hw_sg) { + val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); + if (val) /* Queue is full, wait for the next SOT IRQ */ + return; + } desc = chan->next_desc; @@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) sg = &desc->sg[desc->num_submitted]; /* Already queued in cyclic mode. Wait for it to finish */ - if (sg->id != AXI_DMAC_SG_UNUSED) { + if (sg->hw->id != AXI_DMAC_SG_UNUSED) { sg->schedule_when_free = true; return; } - desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs || - desc->have_partial_xfer) { + if (chan->hw_sg) { + chan->next_desc = NULL; + } else if (++desc->num_submitted == desc->num_sgs || + desc->have_partial_xfer) { if (desc->cyclic) desc->num_submitted = 0; /* Start again */ else @@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) chan->next_desc = desc; } - sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); + sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); - if (axi_dmac_dest_is_mem(chan)) { - axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); - axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); - } + if (!chan->hw_sg) { + if (axi_dmac_dest_is_mem(chan)) { + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride); + } - if (axi_dmac_src_is_mem(chan)) { - axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); - axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); + if (axi_dmac_src_is_mem(chan)) { + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr); + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride); + } } /* * If the hardware supports cyclic transfers and there is no callback to - * call and only a single segment, enable hw cyclic mode to avoid - * unnecessary interrupts. + * call, enable hw cyclic mode to avoid unnecessary interrupts. */ - if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && - desc->num_sgs == 1) - flags |= AXI_DMAC_FLAG_CYCLIC; + if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) { + if (chan->hw_sg) + desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ; + else if (desc->num_sgs == 1) + flags |= AXI_DMAC_FLAG_CYCLIC; + } if (chan->hw_partial_xfer) flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; - axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); - axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); + if (chan->hw_sg) { + axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys); + axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH, + (u64)sg->hw_phys >> 32); + } else { + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len); + axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len); + } axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); } @@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, struct axi_dmac_sg *sg) { if (chan->hw_2d) - return sg->x_len * sg->y_len; + return (sg->hw->x_len + 1) * (sg->hw->y_len + 1); else - return sg->x_len; + return (sg->hw->x_len + 1); } static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) @@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) list_for_each_entry(desc, &chan->active_descs, vdesc.node) { for (i = 0; i < desc->num_sgs; i++) { sg = &desc->sg[i]; - if (sg->id == AXI_DMAC_SG_UNUSED) + if (sg->hw->id == AXI_DMAC_SG_UNUSED) continue; - if (sg->id == id) { + if (sg->hw->id == id) { desc->have_partial_xfer = true; sg->partial_len = len; found_sg = true; @@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, rslt->result = DMA_TRANS_NOERROR; rslt->residue = 0; + if (chan->hw_sg) + return; + /* * We get here if the last completed segment is partial, which * means we can compute the residue from that segment onwards @@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) axi_dmac_dequeue_partial_xfers(chan); - do { - sg = &active->sg[active->num_completed]; - if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ - break; - if (!(BIT(sg->id) & completed_transfers)) - break; - active->num_completed++; - sg->id = AXI_DMAC_SG_UNUSED; - if (sg->schedule_when_free) { - sg->schedule_when_free = false; - start_next = true; + if (chan->hw_sg) { + if (active->cyclic) { + vchan_cyclic_callback(&active->vdesc); + } else { + list_del(&active->vdesc.node); + vchan_cookie_complete(&active->vdesc); + active = axi_dmac_active_desc(chan); + start_next = !!active; } + } else { + do { + sg = &active->sg[active->num_completed]; + if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ + break; + if (!(BIT(sg->hw->id) & completed_transfers)) + break; + active->num_completed++; + sg->hw->id = AXI_DMAC_SG_UNUSED; + if (sg->schedule_when_free) { + sg->schedule_when_free = false; + start_next = true; + } - if (sg->partial_len) - axi_dmac_compute_residue(chan, active); + if (sg->partial_len) + axi_dmac_compute_residue(chan, active); - if (active->cyclic) - vchan_cyclic_callback(&active->vdesc); + if (active->cyclic) + vchan_cyclic_callback(&active->vdesc); - if (active->num_completed == active->num_sgs || - sg->partial_len) { - if (active->cyclic) { - active->num_completed = 0; /* wrap around */ - } else { - list_del(&active->vdesc.node); - vchan_cookie_complete(&active->vdesc); - active = axi_dmac_active_desc(chan); + if (active->num_completed == active->num_sgs || + sg->partial_len) { + if (active->cyclic) { + active->num_completed = 0; /* wrap around */ + } else { + list_del(&active->vdesc.node); + vchan_cookie_complete(&active->vdesc); + active = axi_dmac_active_desc(chan); + } } - } - } while (active); + } while (active); + } return start_next; } @@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c) struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac *dmac = chan_to_axi_dmac(chan); unsigned long flags; + u32 ctrl = AXI_DMAC_CTRL_ENABLE; - axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); + if (chan->hw_sg) + ctrl |= AXI_DMAC_CTRL_ENABLE_SG; + + axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl); spin_lock_irqsave(&chan->vchan.lock, flags); if (vchan_issue_pending(&chan->vchan)) @@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); } -static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) +static struct axi_dmac_desc * +axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs) { + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + struct device *dev = dmac->dma_dev.dev; + struct axi_dmac_hw_desc *hws; struct axi_dmac_desc *desc; + dma_addr_t hw_phys; unsigned int i; desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); if (!desc) return NULL; desc->num_sgs = num_sgs; + desc->chan = chan; + + hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)), + &hw_phys, GFP_ATOMIC); + if (!hws) { + kfree(desc); + return NULL; + } - for (i = 0; i < num_sgs; i++) - desc->sg[i].id = AXI_DMAC_SG_UNUSED; + for (i = 0; i < num_sgs; i++) { + desc->sg[i].hw = &hws[i]; + desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws); + + hws[i].id = AXI_DMAC_SG_UNUSED; + hws[i].flags = 0; + + /* Link hardware descriptors */ + hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws); + } + + /* The last hardware descriptor will trigger an interrupt */ + desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ; return desc; } +static void axi_dmac_free_desc(struct axi_dmac_desc *desc) +{ + struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan); + struct device *dev = dmac->dma_dev.dev; + struct axi_dmac_hw_desc *hw = desc->sg[0].hw; + dma_addr_t hw_phys = desc->sg[0].hw_phys; + + dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)), + hw, hw_phys); + kfree(desc); +} + static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, enum dma_transfer_direction direction, dma_addr_t addr, unsigned int num_periods, unsigned int period_len, @@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; for (i = 0; i < num_periods; i++) { - len = period_len; - - while (len > segment_size) { + for (len = period_len; len > segment_size; sg++) { if (direction == DMA_DEV_TO_MEM) - sg->dest_addr = addr; + sg->hw->dest_addr = addr; else - sg->src_addr = addr; - sg->x_len = segment_size; - sg->y_len = 1; - sg++; + sg->hw->src_addr = addr; + sg->hw->x_len = segment_size - 1; + sg->hw->y_len = 0; + sg->hw->flags = 0; addr += segment_size; len -= segment_size; } if (direction == DMA_DEV_TO_MEM) - sg->dest_addr = addr; + sg->hw->dest_addr = addr; else - sg->src_addr = addr; - sg->x_len = len; - sg->y_len = 1; + sg->hw->src_addr = addr; + sg->hw->x_len = len - 1; + sg->hw->y_len = 0; sg++; addr += len; } @@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( for_each_sg(sgl, sg, sg_len, i) num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); - desc = axi_dmac_alloc_desc(num_sgs); + desc = axi_dmac_alloc_desc(chan, num_sgs); if (!desc) return NULL; @@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( for_each_sg(sgl, sg, sg_len, i) { if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || !axi_dmac_check_len(chan, sg_dma_len(sg))) { - kfree(desc); + axi_dmac_free_desc(desc); return NULL; } @@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( { struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_desc *desc; - unsigned int num_periods, num_segments; + unsigned int num_periods, num_segments, num_sgs; if (direction != chan->direction) return NULL; @@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( num_periods = buf_len / period_len; num_segments = DIV_ROUND_UP(period_len, chan->max_length); + num_sgs = num_periods * num_segments; - desc = axi_dmac_alloc_desc(num_periods * num_segments); + desc = axi_dmac_alloc_desc(chan, num_sgs); if (!desc) return NULL; + /* Chain the last descriptor to the first, and remove its "last" flag */ + desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys; + desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST; + axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, period_len, desc->sg); @@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( return NULL; } - desc = axi_dmac_alloc_desc(1); + desc = axi_dmac_alloc_desc(chan, 1); if (!desc) return NULL; if (axi_dmac_src_is_mem(chan)) { - desc->sg[0].src_addr = xt->src_start; - desc->sg[0].src_stride = xt->sgl[0].size + src_icg; + desc->sg[0].hw->src_addr = xt->src_start; + desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg; } if (axi_dmac_dest_is_mem(chan)) { - desc->sg[0].dest_addr = xt->dst_start; - desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; + desc->sg[0].hw->dest_addr = xt->dst_start; + desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg; } if (chan->hw_2d) { - desc->sg[0].x_len = xt->sgl[0].size; - desc->sg[0].y_len = xt->numf; + desc->sg[0].hw->x_len = xt->sgl[0].size - 1; + desc->sg[0].hw->y_len = xt->numf - 1; } else { - desc->sg[0].x_len = xt->sgl[0].size * xt->numf; - desc->sg[0].y_len = 1; + desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1; + desc->sg[0].hw->y_len = 0; } if (flags & DMA_CYCLIC) @@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c) static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) { - kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); + axi_dmac_free_desc(to_axi_dmac_desc(vdesc)); } static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) @@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) case AXI_DMAC_REG_CURRENT_DEST_ADDR: case AXI_DMAC_REG_PARTIAL_XFER_LEN: case AXI_DMAC_REG_PARTIAL_XFER_ID: + case AXI_DMAC_REG_CURRENT_SG_ID: + case AXI_DMAC_REG_SG_ADDRESS: + case AXI_DMAC_REG_SG_ADDRESS_HIGH: return true; default: return false; @@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) chan->hw_cyclic = true; + axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff); + if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS)) + chan->hw_sg = true; + axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) chan->hw_2d = true; @@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct axi_dmac *dmac; struct regmap *regmap; unsigned int version; + u32 irq_mask = 0; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev) dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); dma_dev->directions = BIT(dmac->chan.direction); dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */ INIT_LIST_HEAD(&dma_dev->channels); dmac->chan.vchan.desc_free = axi_dmac_desc_free; @@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev) dma_dev->copy_align = (dmac->chan.address_align_mask + 1); - axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); + if (dmac->chan.hw_sg) + irq_mask |= AXI_DMAC_IRQ_SOT; + + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask); if (of_dma_is_coherent(pdev->dev.of_node)) { ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC); |