diff options
author | Olof Johansson <olof@lixom.net> | 2014-05-26 23:59:05 +0200 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2014-05-26 23:59:05 +0200 |
commit | 3a5e23cf9e553cd32e68bf29fb5429e3e9a95467 (patch) | |
tree | d6b028bb8ac6c9866be6d0ae14f7bed5ab6edad1 | |
parent | Merge tag 'davinci-fixes-for-v3.15-rc4' of git://git.kernel.org/pub/scm/linux... (diff) | |
parent | ARM: edma: Remove redundant/unused parameters from edma_soc_info (diff) | |
download | linux-3a5e23cf9e553cd32e68bf29fb5429e3e9a95467.tar.xz linux-3a5e23cf9e553cd32e68bf29fb5429e3e9a95467.zip |
Merge tag 'davinci-for-v3.16/edma' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci into next/drivers
Merge "DaVinci EDMA clean-up for v3.16" from Sekhar Nori:
This series makes edma use configuration information available within
the IP instead of reading it from platform data or DT. Some other useful
clean-ups are included too.
* tag 'davinci-for-v3.16/edma' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci: (34 commits)
ARM: edma: Remove redundant/unused parameters from edma_soc_info
ARM: davinci: Remove redundant/unused parameters for edma
ARM: dts: am4372: Remove obsolete properties from edma node
ARM: dts: am33xx: Remove obsolete properties from edma node
dt/bindings: ti,edma: Remove redundant properties from documentation
ARM: edma: Get IP configuration from HW (number of channels, tc, etc)
ARM: edma: Save number of regions from pdata to struct edma
ARM: edma: Remove num_cc member from struct edma
ARM: edma: Remove queue_tc_mapping data from edma_soc_info
ARM: davinci: Remove eDMA3 queue_tc_mapping data from edma_soc_info
ARM: edma: Do not change TC -> Queue mapping, leave it to default.
ARM: edma: Take the number of tc from edma_soc_info (pdata)
ARM: edma: No need to clean the pdata in edma_of_parse_dt()
ARM: edma: Clean up and simplify the code around irq request
dmaengine: edma: update DMA memcpy to use new param element
dmaengine: edma: Document variables used for residue accounting
dmaengine: edma: Provide granular accounting
dmaengine: edma: Make reading the position of active channels work
dmaengine: edma: Store transfer data in edma_desc and edma_pset
dmaengine: edma: Create private pset struct
...
Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r-- | Documentation/devicetree/bindings/dma/ti-edma.txt | 13 | ||||
-rw-r--r-- | arch/arm/boot/dts/am33xx.dtsi | 3 | ||||
-rw-r--r-- | arch/arm/boot/dts/am4372.dtsi | 3 | ||||
-rw-r--r-- | arch/arm/common/edma.c | 197 | ||||
-rw-r--r-- | arch/arm/mach-davinci/devices-da8xx.c | 31 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm355.c | 14 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm365.c | 16 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm644x.c | 14 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm646x.c | 16 | ||||
-rw-r--r-- | drivers/dma/edma.c | 335 | ||||
-rw-r--r-- | include/linux/platform_data/edma.h | 28 |
11 files changed, 373 insertions, 297 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 68ff2137bae7..5ba525a10035 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -2,11 +2,8 @@ TI EDMA Required properties: - compatible : "ti,edma3" -- ti,edma-regions: Number of regions -- ti,edma-slots: Number of slots - #dma-cells: Should be set to <1> Clients should use a single channel number per DMA request. -- dma-channels: Specify total DMA channels per CC - reg: Memory map for accessing module - interrupt-parent: Interrupt controller the interrupt is routed through - interrupts: Exactly 3 interrupts need to be specified in the order: @@ -17,6 +14,13 @@ Optional properties: - ti,hwmods: Name of the hwmods associated to the EDMA - ti,edma-xbar-event-map: Crossbar event to channel map +Deprecated properties: +Listed here in case one wants to boot an old kernel with new DTB. These +properties might need to be added to the new DTS files. +- ti,edma-regions: Number of regions +- ti,edma-slots: Number of slots +- dma-channels: Specify total DMA channels per CC + Example: edma: edma@49000000 { @@ -26,9 +30,6 @@ edma: edma@49000000 { compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; ti,edma-xbar-event-map = /bits/ 16 <1 12 2 13>; }; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 7ad75b4e0663..99e572c45244 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -147,9 +147,6 @@ <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; }; gpio0: gpio@44e07000 { diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index d1f8707ff1df..befb680e5719 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -108,9 +108,6 @@ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; }; uart0: serial@44e09000 { diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 5339009b3c0c..485be42519b9 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -102,7 +102,13 @@ #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) #define EDMA_DCHMAP 0x0100 /* 64 registers */ -#define CHMAP_EXIST BIT(24) + +/* CCCFG register */ +#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ +#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ +#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ +#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ +#define CHMAP_EXIST BIT(24) #define EDMA_MAX_DMACH 64 #define EDMA_MAX_PARAMENTRY 512 @@ -233,7 +239,6 @@ struct edma { unsigned num_region; unsigned num_slots; unsigned num_tc; - unsigned num_cc; enum dma_event_q default_queue; /* list of channels with no even trigger; terminated by "-1" */ @@ -290,12 +295,6 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no, ~(0x7 << bit), queue_no << bit); } -static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) -{ - int bit = queue_no * 4; - edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); -} - static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, int priority) { @@ -994,29 +993,23 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, EXPORT_SYMBOL(edma_set_dest); /** - * edma_get_position - returns the current transfer points + * edma_get_position - returns the current transfer point * @slot: parameter RAM slot being examined - * @src: pointer to source port position - * @dst: pointer to destination port position + * @dst: true selects the dest position, false the source * - * Returns current source and destination addresses for a particular - * parameter RAM slot. Its channel should not be active when this is called. + * Returns the position of the current active slot */ -void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) +dma_addr_t edma_get_position(unsigned slot, bool dst) { - struct edmacc_param temp; - unsigned ctlr; + u32 offs, ctlr = EDMA_CTLR(slot); - ctlr = EDMA_CTLR(slot); slot = EDMA_CHAN_SLOT(slot); - edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); - if (src != NULL) - *src = temp.src; - if (dst != NULL) - *dst = temp.dst; + offs = PARM_OFFSET(slot); + offs += dst ? PARM_DST : PARM_SRC; + + return edma_read(ctlr, offs); } -EXPORT_SYMBOL(edma_get_position); /** * edma_set_src_index - configure DMA source address indexing @@ -1421,6 +1414,67 @@ void edma_clear_event(unsigned channel) } EXPORT_SYMBOL(edma_clear_event); +static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, + struct edma *edma_cc) +{ + int i; + u32 value, cccfg; + s8 (*queue_priority_map)[2]; + + /* Decode the eDMA3 configuration from CCCFG register */ + cccfg = edma_read(0, EDMA_CCCFG); + + value = GET_NUM_REGN(cccfg); + edma_cc->num_region = BIT(value); + + value = GET_NUM_DMACH(cccfg); + edma_cc->num_channels = BIT(value + 1); + + value = GET_NUM_PAENTRY(cccfg); + edma_cc->num_slots = BIT(value + 4); + + value = GET_NUM_EVQUE(cccfg); + edma_cc->num_tc = value + 1; + + dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); + dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); + dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); + dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); + dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); + + /* Nothing need to be done if queue priority is provided */ + if (pdata->queue_priority_mapping) + return 0; + + /* + * Configure TC/queue priority as follows: + * Q0 - priority 0 + * Q1 - priority 1 + * Q2 - priority 2 + * ... + * The meaning of priority numbers: 0 highest priority, 7 lowest + * priority. So Q0 is the highest priority queue and the last queue has + * the lowest priority. + */ + queue_priority_map = devm_kzalloc(dev, + (edma_cc->num_tc + 1) * sizeof(s8), + GFP_KERNEL); + if (!queue_priority_map) + return -ENOMEM; + + for (i = 0; i < edma_cc->num_tc; i++) { + queue_priority_map[i][0] = i; + queue_priority_map[i][1] = i; + } + queue_priority_map[i][0] = -1; + queue_priority_map[i][1] = -1; + + pdata->queue_priority_mapping = queue_priority_map; + pdata->default_queue = 0; + + return 0; +} + #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) static int edma_xbar_event_map(struct device *dev, struct device_node *node, @@ -1471,65 +1525,16 @@ static int edma_of_parse_dt(struct device *dev, struct device_node *node, struct edma_soc_info *pdata) { - int ret = 0, i; - u32 value; + int ret = 0; struct property *prop; size_t sz; struct edma_rsv_info *rsv_info; - s8 (*queue_tc_map)[2], (*queue_priority_map)[2]; - - memset(pdata, 0, sizeof(struct edma_soc_info)); - - ret = of_property_read_u32(node, "dma-channels", &value); - if (ret < 0) - return ret; - pdata->n_channel = value; - - ret = of_property_read_u32(node, "ti,edma-regions", &value); - if (ret < 0) - return ret; - pdata->n_region = value; - - ret = of_property_read_u32(node, "ti,edma-slots", &value); - if (ret < 0) - return ret; - pdata->n_slot = value; - - pdata->n_cc = 1; rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); if (!rsv_info) return -ENOMEM; pdata->rsv = rsv_info; - queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); - if (!queue_tc_map) - return -ENOMEM; - - for (i = 0; i < 3; i++) { - queue_tc_map[i][0] = i; - queue_tc_map[i][1] = i; - } - queue_tc_map[i][0] = -1; - queue_tc_map[i][1] = -1; - - pdata->queue_tc_mapping = queue_tc_map; - - queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); - if (!queue_priority_map) - return -ENOMEM; - - for (i = 0; i < 3; i++) { - queue_priority_map[i][0] = i; - queue_priority_map[i][1] = i; - } - queue_priority_map[i][0] = -1; - queue_priority_map[i][1] = -1; - - pdata->queue_priority_mapping = queue_priority_map; - - pdata->default_queue = 0; - prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); if (prop) ret = edma_xbar_event_map(dev, node, pdata, sz); @@ -1556,6 +1561,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, return ERR_PTR(ret); dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); + dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); of_dma_controller_register(dev->of_node, of_dma_simple_xlate, &edma_filter_info); @@ -1574,7 +1580,6 @@ static int edma_probe(struct platform_device *pdev) struct edma_soc_info **info = pdev->dev.platform_data; struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; s8 (*queue_priority_mapping)[2]; - s8 (*queue_tc_mapping)[2]; int i, j, off, ln, found = 0; int status = -1; const s16 (*rsv_chans)[2]; @@ -1585,7 +1590,6 @@ static int edma_probe(struct platform_device *pdev) struct resource *r[EDMA_MAX_CC] = {NULL}; struct resource res[EDMA_MAX_CC]; char res_name[10]; - char irq_name[10]; struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; int ret; @@ -1650,12 +1654,10 @@ static int edma_probe(struct platform_device *pdev) if (!edma_cc[j]) return -ENOMEM; - edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, - EDMA_MAX_DMACH); - edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot, - EDMA_MAX_PARAMENTRY); - edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc, - EDMA_MAX_CC); + /* Get eDMA3 configuration from IP */ + ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); + if (ret) + return ret; edma_cc[j]->default_queue = info[j]->default_queue; @@ -1707,14 +1709,21 @@ static int edma_probe(struct platform_device *pdev) if (node) { irq[j] = irq_of_parse_and_map(node, 0); + err_irq[j] = irq_of_parse_and_map(node, 2); } else { + char irq_name[10]; + sprintf(irq_name, "edma%d", j); irq[j] = platform_get_irq_byname(pdev, irq_name); + + sprintf(irq_name, "edma%d_err", j); + err_irq[j] = platform_get_irq_byname(pdev, irq_name); } edma_cc[j]->irq_res_start = irq[j]; - status = devm_request_irq(&pdev->dev, irq[j], - dma_irq_handler, 0, "edma", - &pdev->dev); + edma_cc[j]->irq_res_end = err_irq[j]; + + status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, + "edma", dev); if (status < 0) { dev_dbg(&pdev->dev, "devm_request_irq %d failed --> %d\n", @@ -1722,16 +1731,8 @@ static int edma_probe(struct platform_device *pdev) return status; } - if (node) { - err_irq[j] = irq_of_parse_and_map(node, 2); - } else { - sprintf(irq_name, "edma%d_err", j); - err_irq[j] = platform_get_irq_byname(pdev, irq_name); - } - edma_cc[j]->irq_res_end = err_irq[j]; - status = devm_request_irq(&pdev->dev, err_irq[j], - dma_ccerr_handler, 0, - "edma_error", &pdev->dev); + status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, + "edma_error", dev); if (status < 0) { dev_dbg(&pdev->dev, "devm_request_irq %d failed --> %d\n", @@ -1742,14 +1743,8 @@ static int edma_probe(struct platform_device *pdev) for (i = 0; i < edma_cc[j]->num_channels; i++) map_dmach_queue(j, i, info[j]->default_queue); - queue_tc_mapping = info[j]->queue_tc_mapping; queue_priority_mapping = info[j]->queue_priority_mapping; - /* Event queue to TC mapping */ - for (i = 0; queue_tc_mapping[i][0] != -1; i++) - map_queue_tc(j, queue_tc_mapping[i][0], - queue_tc_mapping[i][1]); - /* Event queue priority mapping */ for (i = 0; queue_priority_mapping[i][0] != -1; i++) assign_priority_to_queue(j, @@ -1762,7 +1757,7 @@ static int edma_probe(struct platform_device *pdev) if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) map_dmach_param(j); - for (i = 0; i < info[j]->n_region; i++) { + for (i = 0; i < edma_cc[j]->num_region; i++) { edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); edma_write_array(j, EDMA_QRAE, i, 0x0); diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 56ea41d5f849..b85b781b05fd 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -134,13 +134,6 @@ struct platform_device da8xx_serial_device[] = { } }; -static s8 da8xx_queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {1, 1}, - {-1, -1} -}; - static s8 da8xx_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, @@ -148,12 +141,6 @@ static s8 da8xx_queue_priority_mapping[][2] = { {-1, -1} }; -static s8 da850_queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {-1, -1} -}; - static s8 da850_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, @@ -161,12 +148,6 @@ static s8 da850_queue_priority_mapping[][2] = { }; static struct edma_soc_info da830_edma_cc0_info = { - .n_channel = 32, - .n_region = 4, - .n_slot = 128, - .n_tc = 2, - .n_cc = 1, - .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, .default_queue = EVENTQ_1, }; @@ -177,22 +158,10 @@ static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { static struct edma_soc_info da850_edma_cc_info[] = { { - .n_channel = 32, - .n_region = 4, - .n_slot = 128, - .n_tc = 2, - .n_cc = 1, - .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, .default_queue = EVENTQ_1, }, { - .n_channel = 32, - .n_region = 4, - .n_slot = 128, - .n_tc = 1, - .n_cc = 1, - .queue_tc_mapping = da850_queue_tc_mapping, .queue_priority_mapping = da850_queue_priority_mapping, .default_queue = EVENTQ_0, }, diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 07381d8cea62..2f3ed3a58d57 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -569,14 +569,6 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = { /*----------------------------------------------------------------------*/ static s8 -queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {1, 1}, - {-1, -1}, -}; - -static s8 queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, @@ -585,12 +577,6 @@ queue_priority_mapping[][2] = { }; static struct edma_soc_info edma_cc0_info = { - .n_channel = 64, - .n_region = 4, - .n_slot = 128, - .n_tc = 2, - .n_cc = 1, - .queue_tc_mapping = queue_tc_mapping, .queue_priority_mapping = queue_priority_mapping, .default_queue = EVENTQ_1, }; diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 08a61b938333..0ae8114f5cc9 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -853,16 +853,6 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { /* Four Transfer Controllers on DM365 */ static s8 -dm365_queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {1, 1}, - {2, 2}, - {3, 3}, - {-1, -1}, -}; - -static s8 dm365_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 7}, @@ -873,12 +863,6 @@ dm365_queue_priority_mapping[][2] = { }; static struct edma_soc_info edma_cc0_info = { - .n_channel = 64, - .n_region = 4, - .n_slot = 256, - .n_tc = 4, - .n_cc = 1, - .queue_tc_mapping = dm365_queue_tc_mapping, .queue_priority_mapping = dm365_queue_priority_mapping, .default_queue = EVENTQ_3, }; diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 5debffba4b24..dc52657909c4 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -499,14 +499,6 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = { /*----------------------------------------------------------------------*/ static s8 -queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {1, 1}, - {-1, -1}, -}; - -static s8 queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, @@ -515,12 +507,6 @@ queue_priority_mapping[][2] = { }; static struct edma_soc_info edma_cc0_info = { - .n_channel = 64, - .n_region = 4, - .n_slot = 128, - .n_tc = 2, - .n_cc = 1, - .queue_tc_mapping = queue_tc_mapping, .queue_priority_mapping = queue_priority_mapping, .default_queue = EVENTQ_1, }; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 332d00d24dc2..6c3bbea7d77d 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -533,16 +533,6 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { /* Four Transfer Controllers on DM646x */ static s8 -dm646x_queue_tc_mapping[][2] = { - /* {event queue no, TC no} */ - {0, 0}, - {1, 1}, - {2, 2}, - {3, 3}, - {-1, -1}, -}; - -static s8 dm646x_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 4}, @@ -553,12 +543,6 @@ dm646x_queue_priority_mapping[][2] = { }; static struct edma_soc_info edma_cc0_info = { - .n_channel = 64, - .n_region = 6, /* 0-1, 4-7 */ - .n_slot = 512, - .n_tc = 4, - .n_cc = 1, - .queue_tc_mapping = dm646x_queue_tc_mapping, .queue_priority_mapping = dm646x_queue_priority_mapping, .default_queue = EVENTQ_1, }; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 926360c2db6a..d08c4dedef35 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -57,14 +57,48 @@ #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 +struct edma_pset { + u32 len; + dma_addr_t addr; + struct edmacc_param param; +}; + struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + enum dma_transfer_direction direction; int cyclic; int absync; int pset_nr; + struct edma_chan *echan; int processed; - struct edmacc_param pset[0]; + + /* + * The following 4 elements are used for residue accounting. + * + * - processed_stat: the number of SG elements we have traversed + * so far to cover accounting. This is updated directly to processed + * during edma_callback and is always <= processed, because processed + * refers to the number of pending transfer (programmed to EDMA + * controller), where as processed_stat tracks number of transfers + * accounted for so far. + * + * - residue: The amount of bytes we have left to transfer for this desc + * + * - residue_stat: The residue in bytes of data we have covered + * so far for accounting. This is updated directly to residue + * during callbacks to keep it current. + * + * - sg_len: Tracks the length of the current intermediate transfer, + * this is required to update the residue during intermediate transfer + * completion callback. + */ + int processed_stat; + u32 sg_len; + u32 residue; + u32 residue_stat; + + struct edma_pset pset[0]; }; struct edma_cc; @@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan) /* Find out how many left */ left = edesc->pset_nr - edesc->processed; nslots = min(MAX_NR_SG, left); + edesc->sg_len = 0; /* Write descriptor PaRAM set(s) */ for (i = 0; i < nslots; i++) { j = i + edesc->processed; - edma_write_slot(echan->slot[i], &edesc->pset[j]); - dev_dbg(echan->vchan.chan.device->dev, + edma_write_slot(echan->slot[i], &edesc->pset[j].param); + edesc->sg_len += edesc->pset[j].len; + dev_vdbg(echan->vchan.chan.device->dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" @@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan) " cidx\t%08x\n" " lkrld\t%08x\n", j, echan->ch_num, echan->slot[i], - edesc->pset[j].opt, - edesc->pset[j].src, - edesc->pset[j].dst, - edesc->pset[j].a_b_cnt, - edesc->pset[j].ccnt, - edesc->pset[j].src_dst_bidx, - edesc->pset[j].src_dst_cidx, - edesc->pset[j].link_bcntrld); + edesc->pset[j].param.opt, + edesc->pset[j].param.src, + edesc->pset[j].param.dst, + edesc->pset[j].param.a_b_cnt, + edesc->pset[j].param.ccnt, + edesc->pset[j].param.src_dst_bidx, + edesc->pset[j].param.src_dst_cidx, + edesc->pset[j].param.link_bcntrld); /* Link to the previous slot if not the last set */ if (i != (nslots - 1)) edma_link(echan->slot[i], echan->slot[i+1]); @@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan) } if (edesc->processed <= MAX_NR_SG) { - dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); + dev_dbg(dev, "first transfer starting on channel %d\n", + echan->ch_num); edma_start(echan->ch_num); } else { dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", @@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan) * MAX_NR_SG */ if (echan->missed) { - dev_dbg(dev, "missed event in execute detected\n"); + dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); edma_clean_channel(echan->ch_num); edma_stop(echan->ch_num); edma_start(echan->ch_num); @@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan, return 0; } +static int edma_dma_pause(struct edma_chan *echan) +{ + /* Pause/Resume only allowed with cyclic mode */ + if (!echan->edesc->cyclic) + return -EINVAL; + + edma_pause(echan->ch_num); + return 0; +} + +static int edma_dma_resume(struct edma_chan *echan) +{ + /* Pause/Resume only allowed with cyclic mode */ + if (!echan->edesc->cyclic) + return -EINVAL; + + edma_resume(echan->ch_num); + return 0; +} + static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { @@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, config = (struct dma_slave_config *)arg; ret = edma_slave_config(echan, config); break; + case DMA_PAUSE: + ret = edma_dma_pause(echan); + break; + + case DMA_RESUME: + ret = edma_dma_resume(echan); + break; + default: ret = -ENOSYS; } @@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * @dma_length: Total length of the DMA transfer * @direction: Direction of the transfer */ -static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, +static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, enum dma_slave_buswidth dev_width, unsigned int dma_length, enum dma_transfer_direction direction) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; + struct edmacc_param *param = &epset->param; int acnt, bcnt, ccnt, cidx; int src_bidx, dst_bidx, src_cidx, dst_cidx; int absync; acnt = dev_width; + + /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ + if (!burst) + burst = 1; /* * If the maxburst is equal to the fifo width, use * A-synced transfers. This allows for large contiguous @@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, cidx = acnt * bcnt; } + epset->len = dma_length; + if (direction == DMA_MEM_TO_DEV) { src_bidx = acnt; src_cidx = cidx; dst_bidx = 0; dst_cidx = 0; + epset->addr = src_addr; } else if (direction == DMA_DEV_TO_MEM) { src_bidx = 0; src_cidx = 0; dst_bidx = acnt; dst_cidx = cidx; + epset->addr = dst_addr; + } else if (direction == DMA_MEM_TO_MEM) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = acnt; + dst_cidx = cidx; } else { dev_err(dev, "%s: direction not implemented yet\n", __func__); return -EINVAL; } - pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); /* Configure A or AB synchronized transfers */ if (absync) - pset->opt |= SYNCDIM; + param->opt |= SYNCDIM; - pset->src = src_addr; - pset->dst = dst_addr; + param->src = src_addr; + param->dst = dst_addr; - pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; - pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + param->src_dst_bidx = (dst_bidx << 16) | src_bidx; + param->src_dst_cidx = (dst_cidx << 16) | src_cidx; - pset->a_b_cnt = bcnt << 16 | acnt; - pset->ccnt = ccnt; + param->a_b_cnt = bcnt << 16 | acnt; + param->ccnt = ccnt; /* * Only time when (bcntrld) auto reload is required is for * A-sync case, and in this case, a requirement of reload value * of SZ_64K-1 only is assured. 'link' is initially set to NULL * and then later will be populated by edma_execute. */ - pset->link_bcntrld = 0xffffffff; + param->link_bcntrld = 0xffffffff; return absync; } @@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { - dev_err(dev, "%s: bad direction?\n", __func__); + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { - dev_err(dev, "Undefined slave buswidth\n"); + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), GFP_ATOMIC); if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; } edesc->pset_nr = sg_len; + edesc->residue = 0; + edesc->direction = direction; + edesc->echan = echan; /* Allocate a PaRAM slot, if needed */ nslots = min_t(unsigned, MAX_NR_SG, sg_len); @@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); - dev_err(dev, "Failed to allocate slot\n"); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); return NULL; } } @@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( } edesc->absync = ret; + edesc->residue += sg_dma_len(sg); /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ if (!((i+1) % MAX_NR_SG)) - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; } + edesc->residue_stat = edesc->residue; + + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} + +struct dma_async_tx_descriptor *edma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + int ret; + struct edma_desc *edesc; + struct device *dev = chan->device->dev; + struct edma_chan *echan = to_edma_chan(chan); + + if (unlikely(!echan || !len)) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->pset_nr = 1; + + ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, + DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); + if (ret < 0) + return NULL; + + edesc->absync = ret; + + /* + * Enable intermediate transfer chaining to re-trigger channel + * on completion of every TR, and enable transfer-completion + * interrupt on completion of the whole transfer. + */ + edesc->pset[0].param.opt |= ITCCHEN; + edesc->pset[0].param.opt |= TCINTEN; return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } @@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { - dev_err(dev, "%s: bad direction?\n", __func__); + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { - dev_err(dev, "Undefined slave buswidth\n"); + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } @@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; } edesc->cyclic = 1; edesc->pset_nr = nslots; + edesc->residue = edesc->residue_stat = buf_len; + edesc->direction = direction; + edesc->echan = echan; - dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); - dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); - dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", + __func__, echan->ch_num, nslots, period_len, buf_len); for (i = 0; i < nslots; i++) { /* Allocate a PaRAM slot, if needed */ @@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); - dev_err(dev, "Failed to allocate slot\n"); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); return NULL; } } @@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( else src_addr += period_len; - dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); - dev_dbg(dev, + dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_vdbg(dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" @@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( " cidx\t%08x\n" " lkrld\t%08x\n", i, echan->ch_num, echan->slot[i], - edesc->pset[i].opt, - edesc->pset[i].src, - edesc->pset[i].dst, - edesc->pset[i].a_b_cnt, - edesc->pset[i].ccnt, - edesc->pset[i].src_dst_bidx, - edesc->pset[i].src_dst_cidx, - edesc->pset[i].link_bcntrld); + edesc->pset[i].param.opt, + edesc->pset[i].param.src, + edesc->pset[i].param.dst, + edesc->pset[i].param.a_b_cnt, + edesc->pset[i].param.ccnt, + edesc->pset[i].param.src_dst_bidx, + edesc->pset[i].param.src_dst_cidx, + edesc->pset[i].param.link_bcntrld); edesc->absync = ret; @@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( * Enable interrupts for every period because callback * has to be called for every period. */ - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) struct edma_chan *echan = data; struct device *dev = echan->vchan.chan.device->dev; struct edma_desc *edesc; - unsigned long flags; struct edmacc_param p; edesc = echan->edesc; @@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) switch (ch_status) { case EDMA_DMA_COMPLETE: - spin_lock_irqsave(&echan->vchan.lock, flags); + spin_lock(&echan->vchan.lock); if (edesc) { if (edesc->cyclic) { vchan_cyclic_callback(&edesc->vdesc); } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); + edesc->residue = 0; edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + + /* Update statistics for tx_status */ + edesc->residue -= edesc->sg_len; + edesc->residue_stat = edesc->residue; + edesc->processed_stat = edesc->processed; + edma_execute(echan); } } - spin_unlock_irqrestore(&echan->vchan.lock, flags); + spin_unlock(&echan->vchan.lock); break; case EDMA_DMA_CC_ERROR: - spin_lock_irqsave(&echan->vchan.lock, flags); + spin_lock(&echan->vchan.lock); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) edma_trigger_channel(echan->ch_num); } - spin_unlock_irqrestore(&echan->vchan.lock, flags); + spin_unlock(&echan->vchan.lock); break; default: @@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) echan->alloced = true; echan->slot[0] = echan->ch_num; - dev_dbg(dev, "allocated channel for %u:%u\n", + dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); return 0; @@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&echan->vchan.lock, flags); } -static size_t edma_desc_size(struct edma_desc *edesc) +static u32 edma_residue(struct edma_desc *edesc) { + bool dst = edesc->direction == DMA_DEV_TO_MEM; + struct edma_pset *pset = edesc->pset; + dma_addr_t done, pos; int i; - size_t size; - - if (edesc->absync) - for (size = i = 0; i < edesc->pset_nr; i++) - size += (edesc->pset[i].a_b_cnt & 0xffff) * - (edesc->pset[i].a_b_cnt >> 16) * - edesc->pset[i].ccnt; - else - size = (edesc->pset[0].a_b_cnt & 0xffff) * - (edesc->pset[0].a_b_cnt >> 16) + - (edesc->pset[0].a_b_cnt & 0xffff) * - (SZ_64K - 1) * edesc->pset[0].ccnt; - - return size; + + /* + * We always read the dst/src position from the first RamPar + * pset. That's the one which is active now. + */ + pos = edma_get_position(edesc->echan->slot[0], dst); + + /* + * Cyclic is simple. Just subtract pset[0].addr from pos. + * + * We never update edesc->residue in the cyclic case, so we + * can tell the remaining room to the end of the circular + * buffer. + */ + if (edesc->cyclic) { + done = pos - pset->addr; + edesc->residue_stat = edesc->residue - done; + return edesc->residue_stat; + } + + /* + * For SG operation we catch up with the last processed + * status. + */ + pset += edesc->processed_stat; + + for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { + /* + * If we are inside this pset address range, we know + * this is the active one. Get the current delta and + * stop walking the psets. + */ + if (pos >= pset->addr && pos < pset->addr + pset->len) + return edesc->residue_stat - (pos - pset->addr); + + /* Otherwise mark it done and update residue_stat. */ + edesc->processed_stat++; + edesc->residue_stat -= pset->len; + } + return edesc->residue_stat; } /* Check request completion status */ @@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&echan->vchan.lock, flags); - vdesc = vchan_find_desc(&echan->vchan, cookie); - if (vdesc) { - txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); - } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { - struct edma_desc *edesc = echan->edesc; - txstate->residue = edma_desc_size(edesc); - } + if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) + txstate->residue = edma_residue(echan->edesc); + else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) + txstate->residue = to_edma_desc(&vdesc->tx)->residue; spin_unlock_irqrestore(&echan->vchan.lock, flags); return ret; @@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc, } } +#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int edma_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; + caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = true; + caps->cmd_terminate = true; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + return 0; +} + static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; + dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; dma->device_tx_status = edma_tx_status; dma->device_control = edma_control; + dma->device_slave_caps = edma_dma_device_slave_caps; dma->dev = dev; + /* + * code using dma memcpy must make sure alignment of + * length is at dma->copy_align boundary. + */ + dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; + INIT_LIST_HEAD(&dma->channels); } @@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev) dma_cap_zero(ecc->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); + dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); + dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index f50821cb64be..eb8d5627d080 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -43,15 +43,15 @@ /* PaRAM slots are laid out like this */ struct edmacc_param { - unsigned int opt; - unsigned int src; - unsigned int a_b_cnt; - unsigned int dst; - unsigned int src_dst_bidx; - unsigned int link_bcntrld; - unsigned int src_dst_cidx; - unsigned int ccnt; -}; + u32 opt; + u32 src; + u32 a_b_cnt; + u32 dst; + u32 src_dst_bidx; + u32 link_bcntrld; + u32 src_dst_cidx; + u32 ccnt; +} __packed; /* fields in edmacc_param.opt */ #define SAM BIT(0) @@ -130,7 +130,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, enum address_mode mode, enum fifo_width); void edma_set_dest(unsigned slot, dma_addr_t dest_port, enum address_mode mode, enum fifo_width); -void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst); +dma_addr_t edma_get_position(unsigned slot, bool dst); void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, @@ -158,13 +158,6 @@ struct edma_rsv_info { /* platform_data for EDMA driver */ struct edma_soc_info { - - /* how many dma resources of each type */ - unsigned n_channel; - unsigned n_region; - unsigned n_slot; - unsigned n_tc; - unsigned n_cc; /* * Default queue is expected to be a low-priority queue. * This way, long transfers on the default queue started @@ -175,7 +168,6 @@ struct edma_soc_info { /* Resource reservation for other cores */ struct edma_rsv_info *rsv; - s8 (*queue_tc_mapping)[2]; s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; }; |