diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c | 55 |
1 files changed, 51 insertions, 4 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index c0e165dfc403..6e7c7f459f74 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return 0; } -static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, - struct npa_aq_enq_rsp *rsp) +int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req) +{ + struct npa_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NPA_AQ_INSTOP_INIT) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NPA_AQ_INSTOP_LOCK; + lock_ctx_req.aura_id = req->aura_id; + err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NPA context %s:%d\n", + req->hdr.pcifunc, + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", req->aura_id); + return err; +} + +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_npa_aq_enq_inst(rvu, req, rsp); + if (!err) + err = npa_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools) return NPA_AF_ERR_PARAM; + if (req->way_mask) + req->way_mask &= 0xFFFF; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (!pfvf->npalf || blkaddr < 0) @@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, /* Clear way partition mask and set aura offset to '0' */ cfg &= ~(BIT_ULL(34) - 1); /* Set aura size & enable caching of contexts */ - cfg |= (req->aura_sz << 16) | BIT_ULL(34); + cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask; + rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg); /* Configure aura HW context's base */ @@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, (u64)pfvf->aura_ctx->iova); /* Enable caching of qints hw context */ - rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), + BIT_ULL(36) | req->way_mask << 20); rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf), (u64)pfvf->npa_qints_ctx->iova); @@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); /* Result structure can be followed by Aura/Pool context at |