diff options
author | Dave Jiang <dave.jiang@intel.com> | 2016-07-20 22:14:13 +0200 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-08-08 04:41:42 +0200 |
commit | 72203572afd7aef243c182f19925e5a77a1dc6a1 (patch) | |
tree | a8d97950881d84f5ecf0f803eb25a28ed8c485b8 /drivers | |
parent | ntb: add DMA error handling for TX DMA (diff) | |
download | linux-72203572afd7aef243c182f19925e5a77a1dc6a1.tar.xz linux-72203572afd7aef243c182f19925e5a77a1dc6a1.zip |
ntb: add DMA error handling for RX DMA
Adding support on the rx DMA path to allow recovery of errors when
DMA responds with error status and abort all the subsequent ops.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Allen Hubbe <Allen.Hubbe@emc.com>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: linux-ntb@googlegroups.com
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/ntb/ntb_transport.c | 83 |
1 files changed, 67 insertions, 16 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e61db11e6ccc..8601c10acf74 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -105,13 +105,13 @@ struct ntb_queue_entry { int retries; int errors; unsigned int tx_index; + unsigned int rx_index; struct ntb_transport_qp *qp; union { struct ntb_payload_header __iomem *tx_hdr; struct ntb_payload_header *rx_hdr; }; - unsigned int index; }; struct ntb_rx_info { @@ -265,6 +265,9 @@ static struct ntb_client ntb_transport_client; static int ntb_async_tx_submit(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); +static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); + static int ntb_transport_bus_match(struct device *dev, struct device_driver *drv) @@ -1235,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp) break; entry->rx_hdr->flags = 0; - iowrite32(entry->index, &qp->rx_info->entry); + iowrite32(entry->rx_index, &qp->rx_info->entry); cb_data = entry->cb_data; len = entry->len; @@ -1253,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp) spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); } -static void ntb_rx_copy_callback(void *data) +static void ntb_rx_copy_callback(void *data, + const struct dmaengine_result *res) { struct ntb_queue_entry *entry = data; + /* we need to check DMA results if we are using DMA */ + if (res) { + enum dmaengine_tx_result dma_err = res->result; + + switch (dma_err) { + case DMA_TRANS_READ_FAILED: + case DMA_TRANS_WRITE_FAILED: + entry->errors++; + case DMA_TRANS_ABORTED: + { + struct ntb_transport_qp *qp = entry->qp; + void *offset = qp->rx_buff + qp->rx_max_frame * + qp->rx_index; + + ntb_memcpy_rx(entry, offset); + qp->rx_memcpy++; + return; + } + + case DMA_TRANS_NOERROR: + default: + break; + } + } + entry->flags |= DESC_DONE_FLAG; ntb_complete_rxc(entry->qp); @@ -1272,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) /* Ensure that the data is fully copied out before clearing the flag */ wmb(); - ntb_rx_copy_callback(entry); + ntb_rx_copy_callback(entry, NULL); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; @@ -1288,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) int retries = 0; len = entry->len; - - if (!chan) - goto err; - - if (len < copy_bytes) - goto err; - device = chan->device; pay_off = (size_t)offset & ~PAGE_MASK; buff_off = (size_t)buf & ~PAGE_MASK; @@ -1322,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) unmap->from_cnt = 1; for (retries = 0; retries < DMA_RETRIES; retries++) { - txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + txd = device->device_prep_dma_memcpy(chan, + unmap->addr[1], unmap->addr[0], len, DMA_PREP_INTERRUPT); if (txd) @@ -1337,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) goto err_get_unmap; } - txd->callback = ntb_rx_copy_callback; + txd->callback_result = ntb_rx_copy_callback; txd->callback_param = entry; dma_set_unmap(txd, unmap); @@ -1351,13 +1374,38 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) qp->rx_async++; - return; + return 0; err_set_unmap: dmaengine_unmap_put(unmap); err_get_unmap: dmaengine_unmap_put(unmap); err: + return -ENXIO; +} + +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) +{ + struct ntb_transport_qp *qp = entry->qp; + struct dma_chan *chan = qp->rx_dma_chan; + int res; + + if (!chan) + goto err; + + if (entry->len < copy_bytes) + goto err; + + res = ntb_async_rx_submit(entry, offset); + if (res < 0) + goto err; + + if (!entry->retries) + qp->rx_async++; + + return; + +err: ntb_memcpy_rx(entry, offset); qp->rx_memcpy++; } @@ -1403,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) } entry->rx_hdr = hdr; - entry->index = qp->rx_index; + entry->rx_index = qp->rx_index; if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, @@ -1981,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, entry->buf = data; entry->len = len; entry->flags = 0; + entry->retries = 0; + entry->errors = 0; + entry->rx_index = 0; ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); |