summaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorVincent Whitchurch <vincent.whitchurch@axis.com>2022-09-27 13:21:15 +0200
committerMark Brown <broonie@kernel.org>2022-09-28 13:54:05 +0200
commit0c17ba73c08ff2690c1eff8df374b6709eed55ce (patch)
treeb0c60feb122a9b47c76ab367a0263860cfd70390 /drivers/spi
parentspi: Save current RX and TX DMA devices (diff)
downloadlinux-0c17ba73c08ff2690c1eff8df374b6709eed55ce.tar.xz
linux-0c17ba73c08ff2690c1eff8df374b6709eed55ce.zip
spi: Fix cache corruption due to DMA/PIO overlap
The SPI core DMA mapping support performs cache management once for the entire message and not between transfers, and this leads to cache corruption if a message has two or more RX transfers with both transfers targeting the same cache line, and the controller driver decides to handle one using DMA and the other using PIO (for example, because one is much larger than the other). Fix it by syncing before/after the actual transfers. This also means that we can skip the sync during the map/unmap of the message. Fixes: 99adef310f68 ("spi: Provide core support for DMA mapping transfers") Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Link: https://lore.kernel.org/r/20220927112117.77599-3-vincent.whitchurch@axis.com Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi.c109
1 files changed, 88 insertions, 21 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a492a2a16fed..3b7efdd6a694 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
}
#ifdef CONFIG_HAS_DMA
-int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, void *buf, size_t len,
- enum dma_data_direction dir)
+static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@@ -1078,28 +1078,39 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}
- ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
- if (!ret)
- ret = -ENOMEM;
+ ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
- sgt->nents = ret;
-
return 0;
}
-void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, enum dma_data_direction dir)
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
+}
+
+static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
if (sgt->orig_nents) {
- dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
}
}
+void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
+}
+
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@@ -1124,24 +1135,30 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
- ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE,
+ attrs);
if (ret != 0)
return ret;
}
if (xfer->rx_buf != NULL) {
- ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
- xfer->rx_buf, xfer->len,
- DMA_FROM_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE, attrs);
if (ret != 0) {
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, tx_dev,
+ &xfer->tx_sg, DMA_TO_DEVICE,
+ attrs);
+
return ret;
}
}
@@ -1164,17 +1181,52 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync has already been done after each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
- spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
}
ctlr->cur_msg_mapped = false;
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
@@ -1187,6 +1239,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@@ -1445,8 +1507,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
+ spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ spi_dma_sync_for_cpu(ctlr, xfer);
+
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
@@ -1469,6 +1534,8 @@ fallback_pio:
if (ret < 0)
msg->status = ret;
}
+
+ spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,