diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 18:26:42 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 18:26:42 +0100 |
commit | de399813b521ea7e38bbfb5e5b620b5e202e5783 (patch) | |
tree | ceb8302f9d6a7a4f2e25b64c5dc42c1fb80b435b /drivers/soc/fsl/qbman | |
parent | Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s39... (diff) | |
parent | Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwoo... (diff) | |
download | linux-de399813b521ea7e38bbfb5e5b620b5e202e5783.tar.xz linux-de399813b521ea7e38bbfb5e5b620b5e202e5783.zip |
Merge tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Highlights include:
- Support for the kexec_file_load() syscall, which is a prereq for
secure and trusted boot.
- Prevent kernel execution of userspace on P9 Radix (similar to
SMEP/PXN).
- Sort the exception tables at build time, to save time at boot, and
store them as relative offsets to save space in the kernel image &
memory.
- Allow building the kernel with thin archives, which should allow us
to build an allyesconfig once some other fixes land.
- Build fixes to allow us to correctly rebuild when changing the
kernel endian from big to little or vice versa.
- Plumbing so that we can avoid doing a full mm TLB flush on P9
Radix.
- Initial stack protector support (-fstack-protector).
- Support for dumping the radix (aka. Linux) and hash page tables via
debugfs.
- Fix an oops in cxl coredump generation when cxl_get_fd() is used.
- Freescale updates from Scott: "Highlights include 8xx hugepage
support, qbman fixes/cleanup, device tree updates, and some misc
cleanup."
- Many and varied fixes and minor enhancements as always.
Thanks to:
Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Anshuman
Khandual, Anton Blanchard, Balbir Singh, Bartlomiej Zolnierkiewicz,
Christophe Jaillet, Christophe Leroy, Denis Kirjanov, Elimar
Riesebieter, Frederic Barrat, Gautham R. Shenoy, Geliang Tang, Geoff
Levand, Jack Miller, Johan Hovold, Lars-Peter Clausen, Libin,
Madhavan Srinivasan, Michael Neuling, Nathan Fontenot, Naveen N.
Rao, Nicholas Piggin, Pan Xinhui, Peter Senna Tschudin, Rashmica
Gupta, Rui Teng, Russell Currey, Scott Wood, Simon Guo, Suraj
Jitindar Singh, Thiago Jung Bauermann, Tobias Klauser, Vaibhav Jain"
[ And thanks to Michael, who took time off from a new baby to get this
pull request done. - Linus ]
* tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (174 commits)
powerpc/fsl/dts: add FMan node for t1042d4rdb
powerpc/fsl/dts: add sg_2500_aqr105_phy4 alias on t1024rdb
powerpc/fsl/dts: add QMan and BMan nodes on t1024
powerpc/fsl/dts: add QMan and BMan nodes on t1023
soc/fsl/qman: test: use DEFINE_SPINLOCK()
powerpc/fsl-lbc: use DEFINE_SPINLOCK()
powerpc/8xx: Implement support of hugepages
powerpc: get hugetlbpage handling more generic
powerpc: port 64 bits pgtable_cache to 32 bits
powerpc/boot: Request no dynamic linker for boot wrapper
soc/fsl/bman: Use resource_size instead of computation
soc/fsl/qe: use builtin_platform_driver
powerpc/fsl_pmc: use builtin_platform_driver
powerpc/83xx/suspend: use builtin_platform_driver
powerpc/ftrace: Fix the comments for ftrace_modify_code
powerpc/perf: macros for power9 format encoding
powerpc/perf: power9 raw event format encoding
powerpc/perf: update attribute_group data structure
powerpc/perf: factor out the event format field
powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown
...
Diffstat (limited to 'drivers/soc/fsl/qbman')
-rw-r--r-- | drivers/soc/fsl/qbman/bman.c | 8 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman_ccsr.c | 3 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/bman_portal.c | 17 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/dpaa_sys.h | 1 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman.c | 233 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_ccsr.c | 3 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_portal.c | 41 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_priv.h | 17 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_test_api.c | 27 | ||||
-rw-r--r-- | drivers/soc/fsl/qbman/qman_test_stash.c | 38 |
10 files changed, 201 insertions, 187 deletions
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index ffa48fdbb1a9..a3d6d7cfa929 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c @@ -167,12 +167,12 @@ struct bm_portal { /* Cache-inhibited register access. */ static inline u32 bm_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } struct bman_portal { @@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal) i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr_ptr2idx(rcr->cursor)) - pr_crit("losing uncommited RCR entries\n"); + pr_crit("losing uncommitted RCR entries\n"); i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr->ci) diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 9deb0524543f..a8e8389a6894 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c @@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev) node->full_name); return -ENXIO; } - bm_ccsr_start = devm_ioremap(dev, res->start, - res->end - res->start + 1); + bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); if (!bm_ccsr_start) return -ENXIO; diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 986f64690e6e..8354d4dabdad 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c @@ -126,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -150,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev) spin_unlock(&bman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -159,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index b63fd72295c6..2eaf3184f61d 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h @@ -38,6 +38,7 @@ #include <linux/kthread.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> +#include <linux/of.h> #include <linux/of_reserved_mem.h> #include <linux/prefetch.h> #include <linux/genalloc.h> diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 119054bc922b..6f509f68085e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ struct qm_eqcr_entry { u8 _ncw_verb; /* writes to this are non-coherent */ u8 dca; - u16 seqnum; - u32 orp; /* 24-bit */ - u32 fqid; /* 24-bit */ - u32 tag; + __be16 seqnum; + u8 __reserved[4]; + __be32 fqid; /* 24-bit */ + __be32 tag; struct qm_fd fd; u8 __reserved3[32]; } __packed; @@ -183,41 +183,22 @@ struct qm_mr { }; /* MC (Management Command) command */ -/* "Query FQ" */ -struct qm_mcc_queryfq { +/* "FQ" command layout */ +struct qm_mcc_fq { u8 _ncw_verb; u8 __reserved1[3]; - u32 fqid; /* 24-bit */ + __be32 fqid; /* 24-bit */ u8 __reserved2[56]; } __packed; -/* "Alter FQ State Commands " */ -struct qm_mcc_alterfq { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2; - u8 count; /* number of consecutive FQID */ - u8 __reserved3[10]; - u32 context_b; /* frame queue context b */ - u8 __reserved4[40]; -} __packed; -/* "Query CGR" */ -struct qm_mcc_querycgr { +/* "CGR" command layout */ +struct qm_mcc_cgr { u8 _ncw_verb; u8 __reserved1[30]; u8 cgid; u8 __reserved2[32]; }; -struct qm_mcc_querywq { - u8 _ncw_verb; - u8 __reserved; - /* select channel if verb != QUERYWQ_DEDICATED */ - u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ - u8 __reserved2[60]; -} __packed; - #define QM_MCC_VERB_VBIT 0x80 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ #define QM_MCC_VERB_INITFQ_PARKED 0x40 @@ -243,12 +224,9 @@ union qm_mc_command { u8 __reserved[63]; }; struct qm_mcc_initfq initfq; - struct qm_mcc_queryfq queryfq; - struct qm_mcc_alterfq alterfq; struct qm_mcc_initcgr initcgr; - struct qm_mcc_querycgr querycgr; - struct qm_mcc_querywq querywq; - struct qm_mcc_queryfq_np queryfq_np; + struct qm_mcc_fq fq; + struct qm_mcc_cgr cgr; }; /* MC (Management Command) result */ @@ -343,12 +321,12 @@ struct qm_portal { /* Cache-inhibited register access. */ static inline u32 qm_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ci + offset); + return be32_to_cpu(__raw_readl(p->addr.ci + offset)); } static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) { - __raw_writel(val, p->addr.ci + offset); + __raw_writel(cpu_to_be32(val), p->addr.ci + offset); } /* Cache Enabled Portal Access */ @@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) { - return __raw_readl(p->addr.ce + offset); + return be32_to_cpu(__raw_readl(p->addr.ce + offset)); } /* --- EQCR API --- */ @@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) DPAA_ASSERT(!eqcr->busy); if (pi != eqcr_ptr2idx(eqcr->cursor)) - pr_crit("losing uncommited EQCR entries\n"); + pr_crit("losing uncommitted EQCR entries\n"); if (ci != eqcr->ci) pr_crit("missing existing EQCR completions\n"); if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) @@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) { DPAA_ASSERT(eqcr->busy); - DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); - DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); + DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); DPAA_ASSERT(eqcr->available >= 1); } @@ -962,8 +939,6 @@ struct qman_portal { u32 sdqcr; /* probing time config params for cpu-affine portals */ const struct qm_portal_config *config; - /* needed for providing a non-NULL device to dma_map_***() */ - struct platform_device *pdev; /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ struct qman_cgrs *cgrs; /* linked-list of CSCN handlers. */ @@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal, const struct qman_cgrs *cgrs) { struct qm_portal *p; - char buf[16]; int ret; u32 isdr; @@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal, portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; - sprintf(buf, "qportal-%d", c->channel); - portal->pdev = platform_device_alloc(buf, -1); - if (!portal->pdev) - goto fail_devalloc; - if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) - goto fail_devadd; - ret = platform_device_add(portal->pdev); - if (ret) - goto fail_devadd; isdr = 0xffffffff; qm_out(p, QM_REG_ISDR, isdr); portal->irq_sources = 0; @@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal, /* special handling, drain just in case it's a few FQRNIs */ const union qm_mr_entry *e = qm_mr_current(p); - dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", - e->verb, e->ern.rc, e->ern.fd.addr_lo); + dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", + e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); goto fail_dqrr_mr_empty; } /* Success */ @@ -1256,10 +1221,6 @@ fail_eqcr_empty: fail_affinity: free_irq(c->irq, portal); fail_irq: - platform_device_del(portal->pdev); -fail_devadd: - platform_device_put(portal->pdev); -fail_devalloc: kfree(portal->cgrs); fail_cgrs: qm_mc_finish(p); @@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm) qm_dqrr_finish(&qm->p); qm_eqcr_finish(&qm->p); - platform_device_del(qm->pdev); - platform_device_put(qm->pdev); - qm->config = NULL; } @@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work) case QM_MR_VERB_FQRN: case QM_MR_VERB_FQRL: /* Lookup in the retirement table */ - fq = fqid_to_fq(msg->fq.fqid); + fq = fqid_to_fq(qm_fqid_get(&msg->fq)); if (WARN_ON(!fq)) break; fq_state_change(p, fq, msg, verb); @@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) break; case QM_MR_VERB_FQPN: /* Parked */ - fq = tag_to_fq(msg->fq.contextB); + fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, msg); @@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work) } } else { /* Its a software ERN */ - fq = tag_to_fq(msg->ern.tag); + fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); fq->cb.ern(p, fq, msg); } num++; @@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* - * VDQCR: don't trust contextB as the FQ may have + * VDQCR: don't trust context_b as the FQ may have * been configured for h/w consumption and we're * draining it post-retirement. */ @@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) clear_vdqcr(p, fq); } else { - /* SDQCR: contextB points to the FQ */ - fq = tag_to_fq(dq->contextB); + /* SDQCR: context_b points to the FQ */ + fq = tag_to_fq(be32_to_cpu(dq->context_b)); /* Now let the callback do its stuff */ res = fq->cb.dqrr(p, fq, dq); /* @@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif - if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { /* And can't be set at the same time as TDTHRESH */ - if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) return -EINVAL; } /* Issue an INITFQ_[PARKED|SCHED] management command */ @@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) mcc = qm_mc_start(&p->p); if (opts) mcc->initfq = *opts; - mcc->initfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); mcc->initfq.count = 0; /* - * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a * demux pointer. Otherwise, the caller-provided value is allowed to * stand, don't overwrite it. */ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { dma_addr_t phys_fq; - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; - mcc->initfq.fqd.context_b = fq_to_tag(fq); + mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); + mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); /* * and the physical address - NB, if the user wasn't trying to * set CONTEXTA, clear the stashing settings. */ - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_CONTEXTA); memset(&mcc->initfq.fqd.context_a, 0, sizeof(mcc->initfq.fqd.context_a)); } else { - phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), - DMA_TO_DEVICE); + struct qman_portal *p = qman_dma_portal; + + phys_fq = dma_map_single(p->config->dev, fq, + sizeof(*fq), DMA_TO_DEVICE); + if (dma_mapping_error(p->config->dev, phys_fq)) { + dev_err(p->config->dev, "dma_mapping failed\n"); + ret = -EIO; + goto out; + } + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); } } if (flags & QMAN_INITFQ_FLAG_LOCAL) { int wq = 0; - if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { - mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + if (!(be16_to_cpu(mcc->initfq.we_mask) & + QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= + cpu_to_be16(QM_INITFQ_WE_DESTWQ); wq = 4; } qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); @@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) goto out; } if (opts) { - if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { - if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { + if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) fq_set(fq, QMAN_FQ_STATE_CGR_EN); else fq_clear(fq, QMAN_FQ_STATE_CGR_EN); } - if (opts->we_mask & QM_INITFQ_WE_CGID) + if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) fq->cgr_groupid = opts->fqd.cgid; } fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? @@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(p->config->dev, "ALTER_SCHED timeout\n"); @@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); @@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) msg.verb = QM_MR_VERB_FQRNI; msg.fq.fqs = mcr->alterfq.fqs; - msg.fq.fqid = fq->fqid; - msg.fq.contextB = fq_to_tag(fq); + qm_fqid_set(&msg.fq, fq->fqid); + msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); fq->cb.fqs(p, fq, &msg); } } else if (res == QM_MCR_RESULT_PENDING) { @@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq) goto out; } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fq->fqid; + qm_fqid_set(&mcc->fq, fq->fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr, int ret = 0; mcc = qm_mc_start(&p->p); - mcc->querycgr.cgid = cgr->cgrid; + mcc->cgr.cgid = cgr->cgrid; qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) if (unlikely(!eq)) goto out; - eq->fqid = fq->fqid; - eq->tag = fq_to_tag(fq); + qm_fqid_set(eq, fq->fqid); + eq->tag = cpu_to_be32(fq_to_tag(fq)); eq->fd = *fd; qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); @@ -2282,7 +2252,24 @@ out: } #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) -#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) + +/* congestion state change notification target update control */ +static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | + QM_CGR_TARG_UDP_CTRL_WRITE_BIT); + else + cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); +} + +static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) +{ + if (qman_ip_rev >= QMAN_REV30) + cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); + else + cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); +} static u8 qman_cgr_cpus[CGR_NUM]; @@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts) { struct qm_mcr_querycgr cgr_state; - struct qm_mcc_initcgr local_opts = {}; int ret; struct qman_portal *p; @@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, spin_lock(&p->cgr_lock); if (opts) { + struct qm_mcc_initcgr local_opts = *opts; + ret = qman_query_cgr(cgr, &cgr_state); if (ret) goto out; - if (opts) - local_opts = *opts; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = - QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); - else - /* Overwrite TARG */ - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | - TARG_MASK(p); - local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); /* send init if flags indicate so */ - if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + if (flags & QMAN_CGR_FLAG_USE_INIT) ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); else @@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr) list_add(&cgr->node, &p->cgr_cbs); goto release_lock; } - /* Overwrite TARG */ - local_opts.we_mask = QM_CGR_WE_CSCN_TARG; - if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) - local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); - else - local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & - ~(TARG_MASK(p)); + + local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); + qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), + be32_to_cpu(cgr_state.cgr.cscn_targ)); + ret = qm_modify_cgr(cgr, 0, &local_opts); if (ret) /* add back to the list */ @@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, } while (wait && !dqrr); while (dqrr) { - if (dqrr->fqid == fqid && (dqrr->stat & s)) + if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) found = 1; qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); qm_dqrr_pvb_update(p); @@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid) dev = p->config->dev; /* Determine the state of the FQID */ mcc = qm_mc_start(&p->p); - mcc->queryfq_np.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid) /* Query which channel the FQ is using */ mcc = qm_mc_start(&p->p); - mcc->queryfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ timeout\n"); @@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_PARKED: orl_empty = 0; mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) { dev_err(dev, "QUERYFQ_NP timeout\n"); @@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid) cpu_relax(); } mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid) case QM_MCR_NP_STATE_RETIRED: /* Send OOS Command */ mcc = qm_mc_start(&p->p); - mcc->alterfq.fqid = fqid; + qm_fqid_set(&mcc->fq, fqid); qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) { ret = -ETIMEDOUT; @@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config( { return portal->config; } +EXPORT_SYMBOL(qman_get_qm_portal_config); struct gen_pool *qm_fqalloc; /* FQID allocator */ struct gen_pool *qm_qpalloc; /* pool-channel allocator */ @@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; + return err; if (qm_fqd_get_chan(&fqd) == qp) { /* The channel is the FQ's target, clean it */ err = qman_shutdown_fq(fq.fqid); @@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid) * error, looking for non-OOS FQDs whose CGR is the CGR being released */ struct qman_fq fq = { - .fqid = 1 + .fqid = QM_FQID_RANGE_START }; int err; @@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid) struct qm_mcr_queryfq_np np; err = qman_query_fq_np(&fq, &np); - if (err) + if (err == -ERANGE) /* FQID range exceeded, found no problems */ return 0; + else if (WARN_ON(err)) + return err; + if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { struct qm_fqd fqd; err = qman_query_fq(&fq, &fqd); if (WARN_ON(err)) - return 0; - if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && + return err; + if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && fqd.cgid == cgrid) { pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", cgrid, fq.fqid); diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 0cace9e0077e..f4e6e70de259 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node, /* map as cacheable, non-guarded */ void __iomem *tmpp = ioremap_prot(addr, sz, 0); + if (!tmpp) + return -ENOMEM; + memset_io(tmpp, 0, sz); flush_dcache_range((unsigned long)tmpp, (unsigned long)tmpp + sz); diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index d068e4820f49..adbaa30d3c5a 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c @@ -30,6 +30,9 @@ #include "qman_priv.h" +struct qman_portal *qman_dma_portal; +EXPORT_SYMBOL(qman_dma_portal); + /* Enable portal interupts (as opposed to polling mode) */ #define CONFIG_FSL_DPA_PIRQ_SLOW 1 #define CONFIG_FSL_DPA_PIRQ_FAST 1 @@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) /* all assigned portals are initialized now */ qman_init_cgr_all(); } + + if (!qman_dma_portal) + qman_dma_portal = p; + spin_unlock(&qman_lock); dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); @@ -217,9 +224,9 @@ static int qman_portal_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct qm_portal_config *pcfg; struct resource *addr_phys[2]; - const u32 *channel; void __iomem *va; - int irq, len, cpu; + int irq, cpu, err; + u32 val; pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); if (!pcfg) @@ -243,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev) return -ENXIO; } - channel = of_get_property(node, "cell-index", &len); - if (!channel || (len != 4)) { + err = of_property_read_u32(node, "cell-index", &val); + if (err) { dev_err(dev, "Can't get %s property 'cell-index'\n", node->full_name); - return -ENXIO; + return err; } - pcfg->channel = *channel; + pcfg->channel = val; pcfg->cpu = -1; irq = platform_get_irq(pdev, 0); if (irq <= 0) { @@ -259,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev) pcfg->irq = irq; va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CE failed\n"); goto err_ioremap1; + } pcfg->addr_virt[DPAA_PORTAL_CE] = va; va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), _PAGE_GUARDED | _PAGE_NO_CACHE); - if (!va) + if (!va) { + dev_err(dev, "ioremap::CI failed\n"); goto err_ioremap2; + } pcfg->addr_virt[DPAA_PORTAL_CI] = va; @@ -285,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev) spin_unlock(&qman_lock); pcfg->cpu = cpu; - if (!init_pcfg(pcfg)) - goto err_ioremap2; + if (dma_set_mask(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask() failed\n"); + goto err_portal_init; + } + + if (!init_pcfg(pcfg)) { + dev_err(dev, "portal init failed\n"); + goto err_portal_init; + } /* clear irq affinity if assigned cpu is offline */ if (!cpu_online(cpu)) @@ -294,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev) return 0; +err_portal_init: + iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); err_ioremap2: iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); err_ioremap1: - dev_err(dev, "ioremap failed\n"); return -ENXIO; } diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5cf821e623a9..53685b59718e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -73,29 +73,23 @@ struct qm_mcr_querycgr { struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[6]; u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ - u32 i_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ u8 __reserved3[3]; u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ - u32 a_bcnt_lo; /* low 32-bits of 40-bit */ - u32 cscn_targ_swp[4]; + __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ + __be32 cscn_targ_swp[4]; } __packed; static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; + return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); } static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) { - return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; + return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); } /* "Query FQ Non-Programmable Fields" */ -struct qm_mcc_queryfq_np { - u8 _ncw_verb; - u8 __reserved1[3]; - u32 fqid; /* 24-bit */ - u8 __reserved2[56]; -} __packed; struct qm_mcr_queryfq_np { u8 verb; @@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids); #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) extern struct qman_portal *affine_portals[NR_CPUS]; +extern struct qman_portal *qman_dma_portal; const struct qm_portal_config *qman_get_qm_portal_config( struct qman_portal *portal); diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c index 6880ff17f45e..2895d062cf51 100644 --- a/drivers/soc/fsl/qbman/qman_test_api.c +++ b/drivers/soc/fsl/qbman/qman_test_api.c @@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd) { qm_fd_addr_set64(fd, 0xabdeadbeefLLU); qm_fd_set_contig_big(fd, 0x0000ffff); - fd->cmd = 0xfeedf00d; + fd->cmd = cpu_to_be32(0xfeedf00d); } static void fd_inc(struct qm_fd *fd) @@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd) len--; qm_fd_set_param(fd, fmt, off, len); - fd->cmd++; + fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1); } /* The only part of the 'fd' we can't memcmp() is the ppid */ -static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b) { - int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b); - if (!r) { - enum qm_fd_format fmt_a, fmt_b; + neq |= qm_fd_get_format(a) != qm_fd_get_format(b); + neq |= a->cfg != b->cfg; + neq |= a->cmd != b->cmd; - fmt_a = qm_fd_get_format(a); - fmt_b = qm_fd_get_format(b); - r = fmt_a - fmt_b; - } - if (!r) - r = a->cfg - b->cfg; - if (!r) - r = a->cmd - b->cmd; - return r; + return neq; } /* test */ @@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { - if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { + if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { pr_err("BADNESS: dequeued frame doesn't match;\n"); return qman_cb_dqrr_consume; } fd_inc(&fd_dq); - if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) { sdqcr_complete = 1; wake_up(&waitqueue); } diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index 43cf66ba42f5..e87b65403b67 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c @@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); /* links together the hp_cpu structs, in first-come first-serve order. */ static LIST_HEAD(hp_cpu_list); -static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); +static DEFINE_SPINLOCK(hp_lock); static unsigned int hp_cpu_list_length; @@ -191,6 +191,9 @@ static void *__frame_ptr; static u32 *frame_ptr; static dma_addr_t frame_dma; +/* needed for dma_map*() */ +static const struct qm_portal_config *pcfg; + /* the main function waits on this */ static DECLARE_WAIT_QUEUE_HEAD(queue); @@ -210,16 +213,14 @@ static int allocate_frame_data(void) { u32 lfsr = HP_FIRST_WORD; int loop; - struct platform_device *pdev = platform_device_alloc("foobar", -1); - if (!pdev) { - pr_crit("platform_device_alloc() failed"); - return -EIO; - } - if (platform_device_add(pdev)) { - pr_crit("platform_device_add() failed"); + if (!qman_dma_portal) { + pr_crit("portal not available\n"); return -EIO; } + + pcfg = qman_get_qm_portal_config(qman_dma_portal); + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); if (!__frame_ptr) return -ENOMEM; @@ -229,15 +230,22 @@ static int allocate_frame_data(void) frame_ptr[loop] = lfsr; lfsr = do_lfsr(lfsr); } - frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + + frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, DMA_BIDIRECTIONAL); - platform_device_del(pdev); - platform_device_put(pdev); + if (dma_mapping_error(pcfg->dev, frame_dma)) { + pr_crit("dma mapping failure\n"); + kfree(__frame_ptr); + return -EIO; + } + return 0; } static void deallocate_frame_data(void) { + dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); kfree(__frame_ptr); } @@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler, int loop; if (qm_fd_addr_get64(fd) != handler->addr) { - pr_crit("bad frame address"); + pr_crit("bad frame address, [%llX != %llX]\n", + qm_fd_addr_get64(fd), handler->addr); return -EIO; } for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { @@ -397,8 +406,9 @@ static int init_handler(void *h) goto failed; } memset(&opts, 0, sizeof(opts)); - opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; - opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | + QM_INITFQ_WE_CONTEXTA); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, &opts); |