summaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw-edma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dw-edma')
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c178
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h37
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c277
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c300
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c77
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-regs.h291
8 files changed, 811 insertions, 355 deletions
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 08d71dafa001..53289927dd0d 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -81,8 +81,13 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
* - Even chunks originate CB equal to 1
*/
chunk->cb = !(desc->chunks_alloc % 2);
- chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
- chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
+ if (chan->dir == EDMA_DIR_WRITE) {
+ chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+ } else {
+ chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
+ chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+ }
if (desc->chunk) {
/* Create and add new element into the linked list */
@@ -329,22 +334,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
- u32 cnt;
+ u32 cnt = 0;
int i;
if (!chan->configured)
return NULL;
switch (chan->config.direction) {
- case DMA_DEV_TO_MEM: /* local dma */
+ case DMA_DEV_TO_MEM: /* local DMA */
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
break;
return NULL;
- case DMA_MEM_TO_DEV: /* local dma */
+ case DMA_MEM_TO_DEV: /* local DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
break;
return NULL;
- default: /* remote dma */
+ default: /* remote DMA */
if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
break;
if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
@@ -352,12 +357,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
if (xfer->xfer.sg.len < 1)
return NULL;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (!xfer->xfer.il->numf)
+ return NULL;
+ if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ return NULL;
+ } else {
+ return NULL;
}
desc = dw_edma_alloc_desc(chan);
@@ -368,18 +380,28 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!chunk))
goto err_alloc;
- src_addr = chan->config.src_addr;
- dst_addr = chan->config.dst_addr;
+ if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ src_addr = xfer->xfer.il->src_start;
+ dst_addr = xfer->xfer.il->dst_start;
+ } else {
+ src_addr = chan->config.src_addr;
+ dst_addr = chan->config.dst_addr;
+ }
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
- } else {
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ if (xfer->xfer.il->numf > 0)
+ cnt = xfer->xfer.il->numf;
+ else
+ cnt = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
- if (!xfer->cyclic && !sg)
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
break;
if (chunk->bursts_alloc == chan->ll_max) {
@@ -392,20 +414,23 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (unlikely(!burst))
goto err_alloc;
- if (xfer->cyclic)
+ if (xfer->type == EDMA_XFER_CYCLIC)
burst->sz = xfer->xfer.cyclic.len;
- else
+ else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
+ else if (xfer->type == EDMA_XFER_INTERLEAVED)
+ burst->sz = xfer->xfer.il->sgl[i].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->dar = xfer->xfer.cyclic.paddr;
- } else {
- burst->dar = dst_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ src_addr += sg_dma_len(sg);
+ burst->dar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -416,10 +441,11 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
} else {
burst->dar = dst_addr;
- if (xfer->cyclic) {
+ if (xfer->type == EDMA_XFER_CYCLIC) {
burst->sar = xfer->xfer.cyclic.paddr;
- } else {
- burst->sar = src_addr;
+ } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
+ dst_addr += sg_dma_len(sg);
+ burst->sar = sg_dma_address(sg);
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -430,10 +456,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
}
}
- if (!xfer->cyclic) {
- src_addr += sg_dma_len(sg);
- dst_addr += sg_dma_len(sg);
+ if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
+ xfer->xfer.il->frame_size > 0) {
+ struct dma_interleaved_template *il = xfer->xfer.il;
+ struct data_chunk *dc = &il->sgl[i];
+
+ if (il->src_sgl) {
+ src_addr += burst->sz;
+ src_addr += dmaengine_get_src_icg(il, dc);
+ }
+
+ if (il->dst_sgl) {
+ dst_addr += burst->sz;
+ dst_addr += dmaengine_get_dst_icg(il, dc);
+ }
}
}
@@ -459,7 +497,7 @@ dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
xfer.xfer.sg.sgl = sgl;
xfer.xfer.sg.len = len;
xfer.flags = flags;
- xfer.cyclic = false;
+ xfer.type = EDMA_XFER_SCATTER_GATHER;
return dw_edma_device_transfer(&xfer);
}
@@ -478,7 +516,23 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
xfer.xfer.cyclic.len = len;
xfer.xfer.cyclic.cnt = count;
xfer.flags = flags;
- xfer.cyclic = true;
+ xfer.type = EDMA_XFER_CYCLIC;
+
+ return dw_edma_device_transfer(&xfer);
+}
+
+static struct dma_async_tx_descriptor *
+dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *ilt,
+ unsigned long flags)
+{
+ struct dw_edma_transfer xfer;
+
+ xfer.dchan = dchan;
+ xfer.direction = ilt->dir;
+ xfer.xfer.il = ilt;
+ xfer.flags = flags;
+ xfer.type = EDMA_XFER_INTERLEAVED;
return dw_edma_device_transfer(&xfer);
}
@@ -642,24 +696,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
struct device *dev = chip->dev;
struct dw_edma *dw = chip->dw;
struct dw_edma_chan *chan;
- size_t ll_chunk, dt_chunk;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 i, j, cnt, ch_cnt;
u32 alloc, off_alloc;
+ u32 i, j, cnt;
int err = 0;
u32 pos;
- ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
- ll_chunk = dw->ll_region.sz;
- dt_chunk = dw->dt_region.sz;
-
- /* Calculate linked list chunk for each channel */
- ll_chunk /= roundup_pow_of_two(ch_cnt);
-
- /* Calculate linked list chunk for each channel */
- dt_chunk /= roundup_pow_of_two(ch_cnt);
-
if (write) {
i = 0;
cnt = dw->wr_ch_cnt;
@@ -691,14 +734,14 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- chan->ll_off = (ll_chunk * i);
- chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
-
- chan->dt_off = (dt_chunk * i);
+ if (write)
+ chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+ else
+ chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max -= 1;
- dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
- write ? "write" : "read", j,
- chan->ll_off, chan->ll_max);
+ dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
+ write ? "write" : "read", j, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
@@ -723,12 +766,15 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
chan->vc.desc_free = vchan_free_desc;
vchan_init(&chan->vc, dma);
- dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
- dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
- dt_region->sz = dt_chunk;
-
- dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
- write ? "write" : "read", j, chan->dt_off);
+ if (write) {
+ dt_region->paddr = dw->dt_region_wr[j].paddr;
+ dt_region->vaddr = dw->dt_region_wr[j].vaddr;
+ dt_region->sz = dw->dt_region_wr[j].sz;
+ } else {
+ dt_region->paddr = dw->dt_region_rd[j].paddr;
+ dt_region->vaddr = dw->dt_region_rd[j].vaddr;
+ dt_region->sz = dw->dt_region_rd[j].sz;
+ }
dw_edma_v0_core_device_config(chan);
}
@@ -738,6 +784,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma_cap_set(DMA_SLAVE, dma->cap_mask);
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -756,6 +803,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
dma->device_tx_status = dw_edma_device_tx_status;
dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
+ dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
dma_set_max_seg_size(dma->dev, U32_MAX);
@@ -863,14 +911,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)
raw_spin_lock_init(&dw->lock);
- /* Find out how many write channels are supported by hardware */
- dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
- if (!dw->wr_ch_cnt)
- return -EINVAL;
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
+ dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
- /* Find out how many read channels are supported by hardware */
- dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
- if (!dw->rd_ch_cnt)
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+ dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
+ dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
+
+ if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
return -EINVAL;
dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
@@ -937,24 +986,23 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Power management */
pm_runtime_disable(dev);
+ /* Deregister eDMA device */
+ dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
+ dma_async_device_unregister(&dw->rd_edma);
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
+ list_del(&chan->vc.chan.device_node);
}
- /* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- dma_async_device_unregister(&dw->rd_edma);
-
/* Turn debugfs off */
- dw_edma_v0_core_debugfs_off();
+ dw_edma_v0_core_debugfs_off(chip);
return 0;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 31fc50d31792..60316d408c3e 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -15,15 +15,18 @@
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
-enum dw_edma_mode {
- EDMA_MODE_LEGACY = 0,
- EDMA_MODE_UNROLL
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
};
enum dw_edma_request {
@@ -38,6 +41,12 @@ enum dw_edma_status {
EDMA_ST_BUSY
};
+enum dw_edma_xfer_type {
+ EDMA_XFER_SCATTER_GATHER = 0,
+ EDMA_XFER_CYCLIC,
+ EDMA_XFER_INTERLEAVED
+};
+
struct dw_edma_chan;
struct dw_edma_chunk;
@@ -82,11 +91,8 @@ struct dw_edma_chan {
int id;
enum dw_edma_dir dir;
- off_t ll_off;
u32 ll_max;
- off_t dt_off;
-
struct msi_msg msi;
enum dw_edma_request request;
@@ -117,19 +123,23 @@ struct dw_edma {
u16 rd_ch_cnt;
struct dw_edma_region rg_region; /* Registers */
- struct dw_edma_region ll_region; /* Linked list */
- struct dw_edma_region dt_region; /* Data */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
struct dw_edma_irq *irq;
int nr_irqs;
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
struct dw_edma_chan *chan;
const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
@@ -146,12 +156,13 @@ struct dw_edma_cyclic {
struct dw_edma_transfer {
struct dma_chan *dchan;
union dw_edma_xfer {
- struct dw_edma_sg sg;
- struct dw_edma_cyclic cyclic;
+ struct dw_edma_sg sg;
+ struct dw_edma_cyclic cyclic;
+ struct dma_interleaved_template *il;
} xfer;
enum dma_transfer_direction direction;
unsigned long flags;
- bool cyclic;
+ enum dw_edma_xfer_type type;
};
static inline
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1eafc602e17e..44f6e09bdb53 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -13,45 +13,81 @@
#include <linux/dma/edma.h>
#include <linux/pci-epf.h>
#include <linux/msi.h>
+#include <linux/bitfield.h>
#include "dw-edma-core.h"
+#define DW_PCIE_VSEC_DMA_ID 0x6
+#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
+#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
+#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
+#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
+
+#define DW_BLOCK(a, b, c) \
+ { \
+ .bar = a, \
+ .off = b, \
+ .sz = c, \
+ },
+
+struct dw_edma_block {
+ enum pci_barno bar;
+ off_t off;
+ size_t sz;
+};
+
struct dw_edma_pcie_data {
/* eDMA registers location */
- enum pci_barno rg_bar;
- off_t rg_off;
- size_t rg_sz;
+ struct dw_edma_block rg;
/* eDMA memory linked list location */
- enum pci_barno ll_bar;
- off_t ll_off;
- size_t ll_sz;
+ struct dw_edma_block ll_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block ll_rd[EDMA_MAX_RD_CH];
/* eDMA memory data location */
- enum pci_barno dt_bar;
- off_t dt_off;
- size_t dt_sz;
+ struct dw_edma_block dt_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_block dt_rd[EDMA_MAX_RD_CH];
/* Other */
- u32 version;
- enum dw_edma_mode mode;
+ enum dw_edma_map_format mf;
u8 irqs;
+ u16 wr_ch_cnt;
+ u16 rd_ch_cnt;
};
static const struct dw_edma_pcie_data snps_edda_data = {
/* eDMA registers location */
- .rg_bar = BAR_0,
- .rg_off = 0x00001000, /* 4 Kbytes */
- .rg_sz = 0x00002000, /* 8 Kbytes */
+ .rg.bar = BAR_0,
+ .rg.off = 0x00001000, /* 4 Kbytes */
+ .rg.sz = 0x00002000, /* 8 Kbytes */
/* eDMA memory linked list location */
- .ll_bar = BAR_2,
- .ll_off = 0x00000000, /* 0 Kbytes */
- .ll_sz = 0x00800000, /* 8 Mbytes */
+ .ll_wr = {
+ /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00000000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00200000, 0x00000800)
+ },
+ .ll_rd = {
+ /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00400000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00600000, 0x00000800)
+ },
/* eDMA memory data location */
- .dt_bar = BAR_2,
- .dt_off = 0x00800000, /* 8 Mbytes */
- .dt_sz = 0x03800000, /* 56 Mbytes */
+ .dt_wr = {
+ /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00800000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00900000, 0x00000800)
+ },
+ .dt_rd = {
+ /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00a00000, 0x00000800)
+ /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */
+ DW_BLOCK(BAR_2, 0x00b00000, 0x00000800)
+ },
/* Other */
- .version = 0,
- .mode = EDMA_MODE_UNROLL,
+ .mf = EDMA_MF_EDMA_UNROLL,
.irqs = 1,
+ .wr_ch_cnt = 2,
+ .rd_ch_cnt = 2,
};
static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
@@ -63,14 +99,58 @@ static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
};
+static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata)
+{
+ u32 val, map;
+ u16 vsec;
+ u64 off;
+
+ vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
+ DW_PCIE_VSEC_DMA_ID);
+ if (!vsec)
+ return;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
+ PCI_VNDR_HEADER_LEN(val) != 0x18)
+ return;
+
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
+ pci_read_config_dword(pdev, vsec + 0x8, &val);
+ map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
+ if (map != EDMA_MF_EDMA_LEGACY &&
+ map != EDMA_MF_EDMA_UNROLL &&
+ map != EDMA_MF_HDMA_COMPAT)
+ return;
+
+ pdata->mf = map;
+ pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
+
+ pci_read_config_dword(pdev, vsec + 0xc, &val);
+ pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
+ pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
+ FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
+
+ pci_read_config_dword(pdev, vsec + 0x14, &val);
+ off = val;
+ pci_read_config_dword(pdev, vsec + 0x10, &val);
+ off <<= 32;
+ off |= val;
+ pdata->rg.off = off;
+}
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
- const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
+ struct dw_edma_pcie_data vsec_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
- int err, nr_irqs;
struct dw_edma *dw;
+ int err, nr_irqs;
+ int i, mask;
/* Enable PCI device */
err = pcim_enable_device(pdev);
@@ -79,11 +159,25 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
+ memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+
+ /*
+ * Tries to find if exists a PCIe Vendor-Specific Extended Capability
+ * for the DMA, if one exists, then reconfigures it.
+ */
+ dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+
/* Mapping PCI BAR regions */
- err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
- BIT(pdata->ll_bar) |
- BIT(pdata->dt_bar),
- pci_name(pdev));
+ mask = BIT(vsec_data.rg.bar);
+ for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_wr[i].bar);
+ mask |= BIT(vsec_data.dt_wr[i].bar);
+ }
+ for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data.ll_rd[i].bar);
+ mask |= BIT(vsec_data.dt_rd[i].bar);
+ }
+ err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
pci_err(pdev, "eDMA BAR I/O remapping failed\n");
return err;
@@ -125,7 +219,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -139,46 +233,109 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
- dw->rg_region.vaddr += pdata->rg_off;
- dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
- dw->rg_region.paddr += pdata->rg_off;
- dw->rg_region.sz = pdata->rg_sz;
-
- dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
- dw->ll_region.vaddr += pdata->ll_off;
- dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
- dw->ll_region.paddr += pdata->ll_off;
- dw->ll_region.sz = pdata->ll_sz;
-
- dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
- dw->dt_region.vaddr += pdata->dt_off;
- dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
- dw->dt_region.paddr += pdata->dt_off;
- dw->dt_region.sz = pdata->dt_sz;
-
- dw->version = pdata->version;
- dw->mode = pdata->mode;
+ dw->mf = vsec_data.mf;
dw->nr_irqs = nr_irqs;
dw->ops = &dw_edma_pcie_core_ops;
+ dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
+ dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
- /* Debug info */
- pci_dbg(pdev, "Version:\t%u\n", dw->version);
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ if (!dw->rg_region.vaddr)
+ return -ENOMEM;
+
+ dw->rg_region.vaddr += vsec_data.rg.off;
+ dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
+ dw->rg_region.paddr += vsec_data.rg.off;
+ dw->rg_region.sz = vsec_data.rg.sz;
+
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
- pci_dbg(pdev, "Mode:\t%s\n",
- dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
+ struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+
+ ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr)
+ return -ENOMEM;
+
+ ll_region->vaddr += ll_block->off;
+ ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->paddr += ll_block->off;
+ ll_region->sz = ll_block->sz;
+
+ dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr)
+ return -ENOMEM;
+
+ dt_region->vaddr += dt_block->off;
+ dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->paddr += dt_block->off;
+ dt_region->sz = dt_block->sz;
+ }
+
+ /* Debug info */
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
+ pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_EDMA_UNROLL)
+ pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
+ else if (dw->mf == EDMA_MF_HDMA_COMPAT)
+ pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+ else
+ pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
+ vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- dw->dt_region.vaddr, &dw->dt_region.paddr);
+ for (i = 0; i < dw->wr_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_wr[i].bar,
+ vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
+ dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+
+ pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_wr[i].bar,
+ vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
+ dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+ }
+
+ for (i = 0; i < dw->rd_ch_cnt; i++) {
+ pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.ll_rd[i].bar,
+ vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
+ dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+
+ pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+ i, vsec_data.dt_rd[i].bar,
+ vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
+ dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+ }
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 692de47b1670..329fc2e57b70 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -28,35 +28,75 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
return dw->rg_region.vaddr;
}
-#define SET(dw, name, value) \
+#define SET_32(dw, name, value) \
writel(value, &(__dw_regs(dw)->name))
-#define GET(dw, name) \
+#define GET_32(dw, name) \
readl(&(__dw_regs(dw)->name))
-#define SET_RW(dw, dir, name, value) \
+#define SET_RW_32(dw, dir, name, value) \
do { \
if ((dir) == EDMA_DIR_WRITE) \
- SET(dw, wr_##name, value); \
+ SET_32(dw, wr_##name, value); \
else \
- SET(dw, rd_##name, value); \
+ SET_32(dw, rd_##name, value); \
} while (0)
-#define GET_RW(dw, dir, name) \
+#define GET_RW_32(dw, dir, name) \
((dir) == EDMA_DIR_WRITE \
- ? GET(dw, wr_##name) \
- : GET(dw, rd_##name))
+ ? GET_32(dw, wr_##name) \
+ : GET_32(dw, rd_##name))
-#define SET_BOTH(dw, name, value) \
+#define SET_BOTH_32(dw, name, value) \
do { \
- SET(dw, wr_##name, value); \
- SET(dw, rd_##name, value); \
+ SET_32(dw, wr_##name, value); \
+ SET_32(dw, rd_##name, value); \
+ } while (0)
+
+#ifdef CONFIG_64BIT
+
+#define SET_64(dw, name, value) \
+ writeq(value, &(__dw_regs(dw)->name))
+
+#define GET_64(dw, name) \
+ readq(&(__dw_regs(dw)->name))
+
+#define SET_RW_64(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_64(dw, wr_##name, value); \
+ else \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#define GET_RW_64(dw, dir, name) \
+ ((dir) == EDMA_DIR_WRITE \
+ ? GET_64(dw, wr_##name) \
+ : GET_64(dw, rd_##name))
+
+#define SET_BOTH_64(dw, name, value) \
+ do { \
+ SET_64(dw, wr_##name, value); \
+ SET_64(dw, rd_##name, value); \
+ } while (0)
+
+#endif /* CONFIG_64BIT */
+
+#define SET_COMPAT(dw, name, value) \
+ writel(value, &(__dw_regs(dw)->type.unroll.name))
+
+#define SET_RW_COMPAT(dw, dir, name, value) \
+ do { \
+ if ((dir) == EDMA_DIR_WRITE) \
+ SET_COMPAT(dw, wr_##name, value); \
+ else \
+ SET_COMPAT(dw, rd_##name, value); \
} while (0)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
- if (dw->mode == EDMA_MODE_LEGACY)
+ if (dw->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
@@ -68,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -93,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{
u32 value;
- if (dw->mode == EDMA_MODE_LEGACY) {
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
unsigned long flags;
@@ -115,21 +155,86 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
return value;
}
-#define SET_CH(dw, dir, ch, name, value) \
+#define SET_CH_32(dw, dir, ch, name, value) \
writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
-#define GET_CH(dw, dir, ch, name) \
+#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL(ll, value) \
+#define SET_LL_32(ll, value) \
writel(value, ll)
+#ifdef CONFIG_64BIT
+
+static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ u64 value, void __iomem *addr)
+{
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ writeq(value, addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ writeq(value, addr);
+ }
+}
+
+static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ const void __iomem *addr)
+{
+ u32 value;
+
+ if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+ u32 viewport_sel;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ if (dir == EDMA_DIR_READ)
+ viewport_sel |= BIT(31);
+
+ writel(viewport_sel,
+ &(__dw_regs(dw)->type.legacy.viewport_sel));
+ value = readq(addr);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+ } else {
+ value = readq(addr);
+ }
+
+ return value;
+}
+
+#define SET_CH_64(dw, dir, ch, name, value) \
+ writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define GET_CH_64(dw, dir, ch, name) \
+ readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
+
+#define SET_LL_64(ll, value) \
+ writeq(value, ll)
+
+#endif /* CONFIG_64BIT */
+
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
- SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
- SET_BOTH(dw, engine_en, 0);
+ SET_BOTH_32(dw, int_mask,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, int_clear,
+ EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
+ SET_BOTH_32(dw, engine_en, 0);
}
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
@@ -137,9 +242,11 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
u32 num_ch;
if (dir == EDMA_DIR_WRITE)
- num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
else
- num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
+ num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
+ GET_32(dw, ctrl));
if (num_ch > EDMA_V0_MAX_NR_CH)
num_ch = EDMA_V0_MAX_NR_CH;
@@ -153,7 +260,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
- GET_CH(dw, chan->dir, chan->id, ch_control1));
+ GET_CH_32(dw, chan->dir, chan->id, ch_control1));
if (tmp == 1)
return DMA_IN_PROGRESS;
@@ -167,26 +274,28 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
- SET_RW(dw, chan->dir, int_clear,
- FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
+ SET_RW_32(dw, chan->dir, int_clear,
+ FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_DONE_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
- return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
+ return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
+ GET_RW_32(dw, dir, int_status));
}
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
@@ -209,15 +318,23 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
/* Channel control */
- SET_LL(&lli[i].control, control);
+ SET_LL_32(&lli[i].control, control);
/* Transfer size */
- SET_LL(&lli[i].transfer_size, child->sz);
- /* SAR - low, high */
- SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
- /* DAR - low, high */
- SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
+ SET_LL_32(&lli[i].transfer_size, child->sz);
+ /* SAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].sar.reg, child->sar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
+ SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
+ #endif /* CONFIG_64BIT */
+ /* DAR */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&lli[i].dar.reg, child->dar);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
+ SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
+ #endif /* CONFIG_64BIT */
i++;
}
@@ -227,10 +344,14 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
control |= DW_EDMA_V0_CB;
/* Channel control */
- SET_LL(&llp->control, control);
- /* Linked list - low, high */
- SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
- SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->control, control);
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -243,28 +364,69 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
- SET_RW(dw, chan->dir, engine_en, BIT(0));
+ SET_RW_32(dw, chan->dir, engine_en, BIT(0));
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+ switch (chan->id) {
+ case 0:
+ SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
+ BIT(0));
+ break;
+ case 1:
+ SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
+ BIT(0));
+ break;
+ case 2:
+ SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
+ BIT(0));
+ break;
+ case 3:
+ SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
+ BIT(0));
+ break;
+ case 4:
+ SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
+ BIT(0));
+ break;
+ case 5:
+ SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
+ BIT(0));
+ break;
+ case 6:
+ SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
+ BIT(0));
+ break;
+ case 7:
+ SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
+ BIT(0));
+ break;
+ }
+ }
/* Interrupt unmask - done, abort */
- tmp = GET_RW(dw, chan->dir, int_mask);
+ tmp = GET_RW_32(dw, chan->dir, int_mask);
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, int_mask, tmp);
+ SET_RW_32(dw, chan->dir, int_mask, tmp);
/* Linked list error */
- tmp = GET_RW(dw, chan->dir, linked_list_err_en);
+ tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
- SET_RW(dw, chan->dir, linked_list_err_en, tmp);
+ SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
/* Channel control */
- SET_CH(dw, chan->dir, chan->id, ch_control1,
- (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
- /* Linked list - low, high */
- SET_CH(dw, chan->dir, chan->id, llp_low,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH(dw, chan->dir, chan->id, llp_high,
- upper_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, ch_control1,
+ (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+ /* Linked list */
+ #ifdef CONFIG_64BIT
+ SET_CH_64(dw, chan->dir, chan->id, llp.reg,
+ chunk->ll_region.paddr);
+ #else /* CONFIG_64BIT */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
+ #endif /* CONFIG_64BIT */
}
/* Doorbell */
- SET_RW(dw, chan->dir, doorbell,
- FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
+ SET_RW_32(dw, chan->dir, doorbell,
+ FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
@@ -273,31 +435,31 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
u32 tmp = 0;
/* MSI done addr - low, high */
- SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
/* MSI abort addr - low, high */
- SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
- SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
+ SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
+ SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
/* MSI data - low, high */
switch (chan->id) {
case 0:
case 1:
- tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
break;
case 2:
case 3:
- tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
break;
case 4:
case 5:
- tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
break;
case 6:
case 7:
- tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
+ tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
break;
}
@@ -316,22 +478,22 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
switch (chan->id) {
case 0:
case 1:
- SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
break;
case 2:
case 3:
- SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
break;
case 4:
case 5:
- SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
break;
case 6:
case 7:
- SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
+ SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
@@ -344,7 +506,7 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
dw_edma_v0_debugfs_on(chip);
}
-void dw_edma_v0_core_debugfs_off(void)
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
{
- dw_edma_v0_debugfs_off();
+ dw_edma_v0_debugfs_off(chip);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index abae1527f1f9..2afa626b8300 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(void);
+void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 6f62711a4c94..4b3bcffd15ef 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -38,7 +38,6 @@
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dentry *base_dir;
static struct dw_edma *dw;
static struct dw_edma_v0_regs __iomem *regs;
@@ -55,7 +54,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
- if (dw->mode == EDMA_MODE_LEGACY &&
+ if (dw->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
@@ -114,12 +113,12 @@ static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
REGISTER(ch_control1),
REGISTER(ch_control2),
REGISTER(transfer_size),
- REGISTER(sar_low),
- REGISTER(sar_high),
- REGISTER(dar_low),
- REGISTER(dar_high),
- REGISTER(llp_low),
- REGISTER(llp_high),
+ REGISTER(sar.lsb),
+ REGISTER(sar.msb),
+ REGISTER(dar.lsb),
+ REGISTER(dar.msb),
+ REGISTER(llp.lsb),
+ REGISTER(llp.msb),
};
nr_entries = ARRAY_SIZE(debugfs_regs);
@@ -132,17 +131,17 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
/* eDMA global registers */
WR_REGISTER(engine_en),
WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight_low),
- WR_REGISTER(ch_arb_weight_high),
+ WR_REGISTER(ch_arb_weight.lsb),
+ WR_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
WR_REGISTER(int_status),
WR_REGISTER(int_mask),
WR_REGISTER(int_clear),
WR_REGISTER(err_status),
- WR_REGISTER(done_imwr_low),
- WR_REGISTER(done_imwr_high),
- WR_REGISTER(abort_imwr_low),
- WR_REGISTER(abort_imwr_high),
+ WR_REGISTER(done_imwr.lsb),
+ WR_REGISTER(done_imwr.msb),
+ WR_REGISTER(abort_imwr.lsb),
+ WR_REGISTER(abort_imwr.msb),
WR_REGISTER(ch01_imwr_data),
WR_REGISTER(ch23_imwr_data),
WR_REGISTER(ch45_imwr_data),
@@ -152,8 +151,8 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt_low),
- WR_REGISTER_UNROLL(engine_hshake_cnt_high),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
WR_REGISTER_UNROLL(ch0_pwr_en),
WR_REGISTER_UNROLL(ch1_pwr_en),
WR_REGISTER_UNROLL(ch2_pwr_en),
@@ -174,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -200,19 +199,19 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
/* eDMA global registers */
RD_REGISTER(engine_en),
RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight_low),
- RD_REGISTER(ch_arb_weight_high),
+ RD_REGISTER(ch_arb_weight.lsb),
+ RD_REGISTER(ch_arb_weight.msb),
/* eDMA interrupts registers */
RD_REGISTER(int_status),
RD_REGISTER(int_mask),
RD_REGISTER(int_clear),
- RD_REGISTER(err_status_low),
- RD_REGISTER(err_status_high),
+ RD_REGISTER(err_status.lsb),
+ RD_REGISTER(err_status.msb),
RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr_low),
- RD_REGISTER(done_imwr_high),
- RD_REGISTER(abort_imwr_low),
- RD_REGISTER(abort_imwr_high),
+ RD_REGISTER(done_imwr.lsb),
+ RD_REGISTER(done_imwr.msb),
+ RD_REGISTER(abort_imwr.lsb),
+ RD_REGISTER(abort_imwr.msb),
RD_REGISTER(ch01_imwr_data),
RD_REGISTER(ch23_imwr_data),
RD_REGISTER(ch45_imwr_data),
@@ -221,8 +220,8 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt_low),
- RD_REGISTER_UNROLL(engine_hshake_cnt_high),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
RD_REGISTER_UNROLL(ch0_pwr_en),
RD_REGISTER_UNROLL(ch1_pwr_en),
RD_REGISTER_UNROLL(ch2_pwr_en),
@@ -243,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
- if (dw->mode == EDMA_MODE_UNROLL) {
+ if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
@@ -272,7 +271,7 @@ static void dw_edma_debugfs_regs(void)
struct dentry *regs_dir;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
+ regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
if (!regs_dir)
return;
@@ -293,19 +292,23 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!regs)
return;
- base_dir = debugfs_create_dir(dw->name, NULL);
- if (!base_dir)
+ dw->debugfs = debugfs_create_dir(dw->name, NULL);
+ if (!dw->debugfs)
return;
- debugfs_create_u32("version", 0444, base_dir, &dw->version);
- debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
- debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
+ debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
-void dw_edma_v0_debugfs_off(void)
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
- debugfs_remove_recursive(base_dir);
+ dw = chip->dw;
+ if (!dw)
+ return;
+
+ debugfs_remove_recursive(dw->debugfs);
+ dw->debugfs = NULL;
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 5450a0a94193..d0ff25a9ea5c 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,13 +13,13 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(void);
+void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
{
}
-static inline void dw_edma_v0_debugfs_off(void)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h
index dfd70e223c2f..e175f7b20480 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h
@@ -25,134 +25,209 @@
#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0)
struct dw_edma_v0_ch_regs {
- u32 ch_control1; /* 0x000 */
- u32 ch_control2; /* 0x004 */
- u32 transfer_size; /* 0x008 */
- u32 sar_low; /* 0x00c */
- u32 sar_high; /* 0x010 */
- u32 dar_low; /* 0x014 */
- u32 dar_high; /* 0x018 */
- u32 llp_low; /* 0x01c */
- u32 llp_high; /* 0x020 */
-};
+ u32 ch_control1; /* 0x0000 */
+ u32 ch_control2; /* 0x0004 */
+ u32 transfer_size; /* 0x0008 */
+ union {
+ u64 reg; /* 0x000c..0x0010 */
+ struct {
+ u32 lsb; /* 0x000c */
+ u32 msb; /* 0x0010 */
+ };
+ } sar;
+ union {
+ u64 reg; /* 0x0014..0x0018 */
+ struct {
+ u32 lsb; /* 0x0014 */
+ u32 msb; /* 0x0018 */
+ };
+ } dar;
+ union {
+ u64 reg; /* 0x001c..0x0020 */
+ struct {
+ u32 lsb; /* 0x001c */
+ u32 msb; /* 0x0020 */
+ };
+ } llp;
+} __packed;
struct dw_edma_v0_ch {
- struct dw_edma_v0_ch_regs wr; /* 0x200 */
- u32 padding_1[55]; /* [0x224..0x2fc] */
- struct dw_edma_v0_ch_regs rd; /* 0x300 */
- u32 padding_2[55]; /* [0x324..0x3fc] */
-};
+ struct dw_edma_v0_ch_regs wr; /* 0x0200 */
+ u32 padding_1[55]; /* 0x0224..0x02fc */
+ struct dw_edma_v0_ch_regs rd; /* 0x0300 */
+ u32 padding_2[55]; /* 0x0324..0x03fc */
+} __packed;
struct dw_edma_v0_unroll {
- u32 padding_1; /* 0x0f8 */
- u32 wr_engine_chgroup; /* 0x100 */
- u32 rd_engine_chgroup; /* 0x104 */
- u32 wr_engine_hshake_cnt_low; /* 0x108 */
- u32 wr_engine_hshake_cnt_high; /* 0x10c */
- u32 padding_2[2]; /* [0x110..0x114] */
- u32 rd_engine_hshake_cnt_low; /* 0x118 */
- u32 rd_engine_hshake_cnt_high; /* 0x11c */
- u32 padding_3[2]; /* [0x120..0x124] */
- u32 wr_ch0_pwr_en; /* 0x128 */
- u32 wr_ch1_pwr_en; /* 0x12c */
- u32 wr_ch2_pwr_en; /* 0x130 */
- u32 wr_ch3_pwr_en; /* 0x134 */
- u32 wr_ch4_pwr_en; /* 0x138 */
- u32 wr_ch5_pwr_en; /* 0x13c */
- u32 wr_ch6_pwr_en; /* 0x140 */
- u32 wr_ch7_pwr_en; /* 0x144 */
- u32 padding_4[8]; /* [0x148..0x164] */
- u32 rd_ch0_pwr_en; /* 0x168 */
- u32 rd_ch1_pwr_en; /* 0x16c */
- u32 rd_ch2_pwr_en; /* 0x170 */
- u32 rd_ch3_pwr_en; /* 0x174 */
- u32 rd_ch4_pwr_en; /* 0x178 */
- u32 rd_ch5_pwr_en; /* 0x18c */
- u32 rd_ch6_pwr_en; /* 0x180 */
- u32 rd_ch7_pwr_en; /* 0x184 */
- u32 padding_5[30]; /* [0x188..0x1fc] */
- struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */
-};
+ u32 padding_1; /* 0x00f8 */
+ u32 wr_engine_chgroup; /* 0x0100 */
+ u32 rd_engine_chgroup; /* 0x0104 */
+ union {
+ u64 reg; /* 0x0108..0x010c */
+ struct {
+ u32 lsb; /* 0x0108 */
+ u32 msb; /* 0x010c */
+ };
+ } wr_engine_hshake_cnt;
+ u32 padding_2[2]; /* 0x0110..0x0114 */
+ union {
+ u64 reg; /* 0x0120..0x0124 */
+ struct {
+ u32 lsb; /* 0x0120 */
+ u32 msb; /* 0x0124 */
+ };
+ } rd_engine_hshake_cnt;
+ u32 padding_3[2]; /* 0x0120..0x0124 */
+ u32 wr_ch0_pwr_en; /* 0x0128 */
+ u32 wr_ch1_pwr_en; /* 0x012c */
+ u32 wr_ch2_pwr_en; /* 0x0130 */
+ u32 wr_ch3_pwr_en; /* 0x0134 */
+ u32 wr_ch4_pwr_en; /* 0x0138 */
+ u32 wr_ch5_pwr_en; /* 0x013c */
+ u32 wr_ch6_pwr_en; /* 0x0140 */
+ u32 wr_ch7_pwr_en; /* 0x0144 */
+ u32 padding_4[8]; /* 0x0148..0x0164 */
+ u32 rd_ch0_pwr_en; /* 0x0168 */
+ u32 rd_ch1_pwr_en; /* 0x016c */
+ u32 rd_ch2_pwr_en; /* 0x0170 */
+ u32 rd_ch3_pwr_en; /* 0x0174 */
+ u32 rd_ch4_pwr_en; /* 0x0178 */
+ u32 rd_ch5_pwr_en; /* 0x018c */
+ u32 rd_ch6_pwr_en; /* 0x0180 */
+ u32 rd_ch7_pwr_en; /* 0x0184 */
+ u32 padding_5[30]; /* 0x0188..0x01fc */
+ struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* 0x0200..0x1120 */
+} __packed;
struct dw_edma_v0_legacy {
- u32 viewport_sel; /* 0x0f8 */
- struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */
-};
+ u32 viewport_sel; /* 0x00f8 */
+ struct dw_edma_v0_ch_regs ch; /* 0x0100..0x0120 */
+} __packed;
struct dw_edma_v0_regs {
/* eDMA global registers */
- u32 ctrl_data_arb_prior; /* 0x000 */
- u32 padding_1; /* 0x004 */
- u32 ctrl; /* 0x008 */
- u32 wr_engine_en; /* 0x00c */
- u32 wr_doorbell; /* 0x010 */
- u32 padding_2; /* 0x014 */
- u32 wr_ch_arb_weight_low; /* 0x018 */
- u32 wr_ch_arb_weight_high; /* 0x01c */
- u32 padding_3[3]; /* [0x020..0x028] */
- u32 rd_engine_en; /* 0x02c */
- u32 rd_doorbell; /* 0x030 */
- u32 padding_4; /* 0x034 */
- u32 rd_ch_arb_weight_low; /* 0x038 */
- u32 rd_ch_arb_weight_high; /* 0x03c */
- u32 padding_5[3]; /* [0x040..0x048] */
+ u32 ctrl_data_arb_prior; /* 0x0000 */
+ u32 padding_1; /* 0x0004 */
+ u32 ctrl; /* 0x0008 */
+ u32 wr_engine_en; /* 0x000c */
+ u32 wr_doorbell; /* 0x0010 */
+ u32 padding_2; /* 0x0014 */
+ union {
+ u64 reg; /* 0x0018..0x001c */
+ struct {
+ u32 lsb; /* 0x0018 */
+ u32 msb; /* 0x001c */
+ };
+ } wr_ch_arb_weight;
+ u32 padding_3[3]; /* 0x0020..0x0028 */
+ u32 rd_engine_en; /* 0x002c */
+ u32 rd_doorbell; /* 0x0030 */
+ u32 padding_4; /* 0x0034 */
+ union {
+ u64 reg; /* 0x0038..0x003c */
+ struct {
+ u32 lsb; /* 0x0038 */
+ u32 msb; /* 0x003c */
+ };
+ } rd_ch_arb_weight;
+ u32 padding_5[3]; /* 0x0040..0x0048 */
/* eDMA interrupts registers */
- u32 wr_int_status; /* 0x04c */
- u32 padding_6; /* 0x050 */
- u32 wr_int_mask; /* 0x054 */
- u32 wr_int_clear; /* 0x058 */
- u32 wr_err_status; /* 0x05c */
- u32 wr_done_imwr_low; /* 0x060 */
- u32 wr_done_imwr_high; /* 0x064 */
- u32 wr_abort_imwr_low; /* 0x068 */
- u32 wr_abort_imwr_high; /* 0x06c */
- u32 wr_ch01_imwr_data; /* 0x070 */
- u32 wr_ch23_imwr_data; /* 0x074 */
- u32 wr_ch45_imwr_data; /* 0x078 */
- u32 wr_ch67_imwr_data; /* 0x07c */
- u32 padding_7[4]; /* [0x080..0x08c] */
- u32 wr_linked_list_err_en; /* 0x090 */
- u32 padding_8[3]; /* [0x094..0x09c] */
- u32 rd_int_status; /* 0x0a0 */
- u32 padding_9; /* 0x0a4 */
- u32 rd_int_mask; /* 0x0a8 */
- u32 rd_int_clear; /* 0x0ac */
- u32 padding_10; /* 0x0b0 */
- u32 rd_err_status_low; /* 0x0b4 */
- u32 rd_err_status_high; /* 0x0b8 */
- u32 padding_11[2]; /* [0x0bc..0x0c0] */
- u32 rd_linked_list_err_en; /* 0x0c4 */
- u32 padding_12; /* 0x0c8 */
- u32 rd_done_imwr_low; /* 0x0cc */
- u32 rd_done_imwr_high; /* 0x0d0 */
- u32 rd_abort_imwr_low; /* 0x0d4 */
- u32 rd_abort_imwr_high; /* 0x0d8 */
- u32 rd_ch01_imwr_data; /* 0x0dc */
- u32 rd_ch23_imwr_data; /* 0x0e0 */
- u32 rd_ch45_imwr_data; /* 0x0e4 */
- u32 rd_ch67_imwr_data; /* 0x0e8 */
- u32 padding_13[4]; /* [0x0ec..0x0f8] */
+ u32 wr_int_status; /* 0x004c */
+ u32 padding_6; /* 0x0050 */
+ u32 wr_int_mask; /* 0x0054 */
+ u32 wr_int_clear; /* 0x0058 */
+ u32 wr_err_status; /* 0x005c */
+ union {
+ u64 reg; /* 0x0060..0x0064 */
+ struct {
+ u32 lsb; /* 0x0060 */
+ u32 msb; /* 0x0064 */
+ };
+ } wr_done_imwr;
+ union {
+ u64 reg; /* 0x0068..0x006c */
+ struct {
+ u32 lsb; /* 0x0068 */
+ u32 msb; /* 0x006c */
+ };
+ } wr_abort_imwr;
+ u32 wr_ch01_imwr_data; /* 0x0070 */
+ u32 wr_ch23_imwr_data; /* 0x0074 */
+ u32 wr_ch45_imwr_data; /* 0x0078 */
+ u32 wr_ch67_imwr_data; /* 0x007c */
+ u32 padding_7[4]; /* 0x0080..0x008c */
+ u32 wr_linked_list_err_en; /* 0x0090 */
+ u32 padding_8[3]; /* 0x0094..0x009c */
+ u32 rd_int_status; /* 0x00a0 */
+ u32 padding_9; /* 0x00a4 */
+ u32 rd_int_mask; /* 0x00a8 */
+ u32 rd_int_clear; /* 0x00ac */
+ u32 padding_10; /* 0x00b0 */
+ union {
+ u64 reg; /* 0x00b4..0x00b8 */
+ struct {
+ u32 lsb; /* 0x00b4 */
+ u32 msb; /* 0x00b8 */
+ };
+ } rd_err_status;
+ u32 padding_11[2]; /* 0x00bc..0x00c0 */
+ u32 rd_linked_list_err_en; /* 0x00c4 */
+ u32 padding_12; /* 0x00c8 */
+ union {
+ u64 reg; /* 0x00cc..0x00d0 */
+ struct {
+ u32 lsb; /* 0x00cc */
+ u32 msb; /* 0x00d0 */
+ };
+ } rd_done_imwr;
+ union {
+ u64 reg; /* 0x00d4..0x00d8 */
+ struct {
+ u32 lsb; /* 0x00d4 */
+ u32 msb; /* 0x00d8 */
+ };
+ } rd_abort_imwr;
+ u32 rd_ch01_imwr_data; /* 0x00dc */
+ u32 rd_ch23_imwr_data; /* 0x00e0 */
+ u32 rd_ch45_imwr_data; /* 0x00e4 */
+ u32 rd_ch67_imwr_data; /* 0x00e8 */
+ u32 padding_13[4]; /* 0x00ec..0x00f8 */
/* eDMA channel context grouping */
union dw_edma_v0_type {
- struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */
- struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */
+ struct dw_edma_v0_legacy legacy; /* 0x00f8..0x0120 */
+ struct dw_edma_v0_unroll unroll; /* 0x00f8..0x1120 */
} type;
-};
+} __packed;
struct dw_edma_v0_lli {
u32 control;
u32 transfer_size;
- u32 sar_low;
- u32 sar_high;
- u32 dar_low;
- u32 dar_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } sar;
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } dar;
+} __packed;
struct dw_edma_v0_llp {
u32 control;
u32 reserved;
- u32 llp_low;
- u32 llp_high;
-};
+ union {
+ u64 reg;
+ struct {
+ u32 lsb;
+ u32 msb;
+ };
+ } llp;
+} __packed;
#endif /* _DW_EDMA_V0_REGS_H */