summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c173
1 files changed, 137 insertions, 36 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b86f980fa7ea..7b8b5b39c7bb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -1343,13 +1342,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1842,7 +1841,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -2176,7 +2175,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -2855,6 +2854,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2897,6 +2897,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -3164,7 +3171,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3295,7 +3302,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -4601,6 +4608,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4615,12 +4633,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4660,7 +4677,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4701,7 +4721,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4722,15 +4742,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -6557,6 +6592,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6566,16 +6602,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -7266,6 +7300,26 @@ err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -7366,6 +7420,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
@@ -8279,16 +8334,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -9117,10 +9176,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -9156,14 +9223,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -9470,6 +9543,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
if (BNXT_PF(bp) &&
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
@@ -11309,8 +11385,6 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
}
}
@@ -11530,6 +11604,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -15572,8 +15666,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_vnic_update(bp, vnic,
VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
-
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
rxr = &bp->rx_ring[idx];
+ cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -15658,6 +15754,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -16186,8 +16286,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);