summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
authorMichael Chan <michael.chan@broadcom.com>2024-02-21 02:03:08 +0300
committerPaolo Abeni <pabeni@redhat.com>2024-02-22 17:31:22 +0300
commit257bbf45af81bac9b0e38be530a554e2cff92700 (patch)
treeb94c7928cd903317d4e0c2d0fde76e48deac0af8 /drivers/net/ethernet/broadcom/bnxt/bnxt.c
parente7b83f2fa445290f4ce906367ccfc8d184a98249 (diff)
downloadlinux-257bbf45af81bac9b0e38be530a554e2cff92700.tar.xz
bnxt_en: Refactor ring reservation functions
The current functions to reserve hardware rings pass in 6 different ring or resource types as parameters. Add a structure bnxt_hw_rings to consolidate all these parameters and pass the structure pointer instead to these functions. Add 2 related helper functions also. This makes the code cleaner and makes it easier to add new resources to be reserved. Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c253
1 files changed, 123 insertions, 130 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6f415425dc14..4a30eff0791b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7098,8 +7098,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -7108,52 +7107,51 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->tx + hwr->grp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->rx ?
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->grp ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
}
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
+ req->num_cmpl_rings = cpu_to_le16(hwr->tx + hwr->grp);
+ req->num_msix = cpu_to_le16(hwr->cp);
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
req->num_rsscos_ctxs = cpu_to_le16(1);
if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
bnxt_rfs_supported(bp))
req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ cpu_to_le16(hwr->grp + 1);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7161,51 +7159,48 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->tx + hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_cmpl_rings = cpu_to_le16(hwr->tx + hwr->grp);
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7219,25 +7214,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7248,15 +7241,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
@@ -7352,47 +7342,60 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat;
+}
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, rc;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
+ hwr.vnic = 1;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ hwr.vnic = hwr.rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7404,17 +7407,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7431,9 +7434,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
@@ -7442,9 +7445,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7452,8 +7453,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7467,15 +7467,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7493,20 +7490,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -12342,21 +12334,22 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.vnic = 1 + bp->rx_nr_rings;
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
/* RSS contexts not a limiting factor */
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (hwr.vnic > max_vnics || hwr.vnic > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12367,15 +12360,16 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ if (hwr.vnic == bp->hw_resc.resv_vnics)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -13299,9 +13293,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
if (tcs)
tx_sets = tcs;
@@ -13314,26 +13307,26 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
+ hwr.vnic = 1;
if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic += rx;
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ return bnxt_hwrm_check_rings(bp, &hwr);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)