diff options
author | Sunil Goutham <sgoutham@marvell.com> | 2019-11-14 08:26:32 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-11-15 05:09:16 +0300 |
commit | a02917663112b3569fd01dd90c2502802c5ed3cf (patch) | |
tree | 82d986dbe37d5767a2dd8df75a51187e307655a0 /drivers/net/ethernet/marvell | |
parent | ee1e75915f4ff00e125adf98c9684c57cae4a88f (diff) | |
download | linux-a02917663112b3569fd01dd90c2502802c5ed3cf.tar.xz |
octeontx2-af: Add option to disable dynamic entry caching in NDC
A config option is added to disable caching of dynamic entries
like SQEs and stack pages. Also locks down all HW contexts in NDC,
preventing them from being evicted.
This option is useful when the queue count is large and there are
huge NDC cache misses. It's trade off between SQ context misses and
dynamically changing entries like SQE and stack page pointers.
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/Kconfig | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 64 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c | 42 |
3 files changed, 112 insertions, 3 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 711ada7139d3..fb34fbd62088 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -16,3 +16,12 @@ config OCTEONTX2_AF Unit's admin function manager which manages all RVU HW resources and provides a medium to other PF/VFs to configure HW. Should be enabled for other RVU device drivers to work. + +config NDC_DIS_DYNAMIC_CACHING + bool "Disable caching of dynamic entries in NDC" + depends on OCTEONTX2_AF + default n + ---help--- + This config option disables caching of dynamic entries such as NIX SQEs + , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and + NPA Aura/Pool contexts. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 86042a74f6dd..63190b89f709 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -661,6 +661,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return 0; } +static const char *nix_get_ctx_name(int ctype) +{ + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + return "CQ"; + case NIX_AQ_CTYPE_SQ: + return "SQ"; + case NIX_AQ_CTYPE_RQ: + return "RQ"; + case NIX_AQ_CTYPE_RSS: + return "RSS"; + } + return ""; +} + static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -705,21 +720,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", - (req->ctype == NIX_AQ_CTYPE_CQ) ? - "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? - "RQ" : "SQ"), qidx); + nix_get_ctx_name(req->ctype), qidx); } } return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) +{ + struct nix_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NIX_AQ_INSTOP_INIT) + return 0; + + if (req->ctype == NIX_AQ_CTYPE_MCE || + req->ctype == NIX_AQ_CTYPE_DYNO) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; + lock_ctx_req.qidx = req->qidx; + err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", + req->hdr.pcifunc, + nix_get_ctx_name(req->ctype), req->qidx); + return err; +} + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_nix_aq_enq_inst(rvu, req, rsp); + if (!err) + err = nix_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -2871,6 +2925,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); cfg &= ~0x3FFEULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of SQB aka SQEs */ + cfg |= 0x04ULL; +#endif rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); /* Result structure can be followed by RQ/SQ/CQ context at diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index a8f9376f6a0b..6e7c7f459f74 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req) +{ + struct npa_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NPA_AQ_INSTOP_INIT) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NPA_AQ_INSTOP_LOCK; + lock_ctx_req.aura_id = req->aura_id; + err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NPA context %s:%d\n", + req->hdr.pcifunc, + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", req->aura_id); + return err; +} + +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_npa_aq_enq_inst(rvu, req, rsp); + if (!err) + err = npa_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -427,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); /* Result structure can be followed by Aura/Pool context at |