diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 341 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 56 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 289 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 61 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 144 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 22 |
9 files changed, 716 insertions, 235 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c7e5e6f09647..1991f0c7bc0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); } cpr->cp_raw_cons = raw_cons; @@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = i; rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; mem_size = rxr->rx_agg_bmap_size / 8; rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); @@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = txr->bnapi->index; if (bp->tx_push_size) { dma_addr_t mapping; @@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp) static int bnxt_alloc_cp_rings(struct bnxt *bp) { - int i, rc; + int i, rc, ulp_base_vec, ulp_msix; + ulp_msix = bnxt_get_ulp_msix_num(bp); + ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; @@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; + + if (ulp_msix && i >= ulp_base_vec) + ring->map_idx = i + ulp_msix; + else + ring->map_idx = i; } return 0; } @@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; + if (bp->hw_rx_port_stats) { dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, bp->hw_rx_port_stats, bp->hw_rx_port_stats_map); bp->hw_rx_port_stats = NULL; - bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + + if (bp->hw_rx_port_stats_ext) { + dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), + bp->hw_rx_port_stats_ext, + bp->hw_rx_port_stats_ext_map); + bp->hw_rx_port_stats_ext = NULL; } if (!bp->bnapi) @@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp) bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || + bp->hwrm_spec_code == 0x10900) + return 0; + + bp->hw_rx_port_stats_ext = + dma_zalloc_coherent(&pdev->dev, + sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats_ext) + return 0; + + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } return 0; } @@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp) } } +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) +{ + struct bnxt_napi *bnapi = bp->bnapi[n]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + return cpr->cp_ring_struct.map_idx; +} + static void bnxt_disable_int_sync(struct bnxt *bp) { int i; @@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp) atomic_inc(&bp->intr_sem); bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + synchronize_irq(bp->irq_tbl[map_idx].vector); + } } static void bnxt_enable_int(struct bnxt *bp) @@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len, *valid; + __le32 *resp_len; + u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; @@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, i = 0; tmo_count = timeout * 40; + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && @@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -1; } + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + valid = bp->hwrm_cmd_resp_addr + len - 1; } else { /* Check if response len is updated */ - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; @@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -1; } - /* Last word of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 4; + /* Last byte of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 1; for (i = 0; i < 5; i++) { - if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) break; udelay(1); } @@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } } + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; rc = le16_to_cpu(resp->error_code); if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", @@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.ver_maj = DRV_VER_MAJ; - req.ver_min = DRV_VER_MIN; - req.ver_upd = DRV_VER_UPD; + req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + req.ver_maj_8b = DRV_VER_MAJ; + req.ver_min_8b = DRV_VER_MIN; + req.ver_upd_8b = DRV_VER_UPD; + req.ver_maj = cpu_to_le16(DRV_VER_MAJ); + req.ver_min = cpu_to_le16(DRV_VER_MIN); + req.ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -3998,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) return rc; } +static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) + return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; + return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; +} + int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { unsigned int ring = 0, grp_idx; @@ -4053,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= - cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); + req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -4135,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - if (resp->flags & - cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + u32 flags = le32_to_cpu(resp->flags); + + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + if (flags & + VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) + bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4204,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, - u32 ring_type, u32 map_index, - u32 stats_ctx_id) + u32 ring_type, u32 map_index) { int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); @@ -4231,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, case HWRM_RING_ALLOC_TX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ - req.cmpl_ring_id = - cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + grp_info = &bp->grp_info[ring->grp_idx]; + req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; case HWRM_RING_ALLOC_RX: @@ -4321,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + u32 map_idx = ring->map_idx; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); + cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, + map_idx); if (rc) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); @@ -4340,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = txr->bnapi->index; - u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; + u32 map_idx = i; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx, fw_stats_ctx); + map_idx); if (rc) goto err_out; txr->tx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4356,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) u32 map_idx = rxr->bnapi->index; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx, INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4369,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; + u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - map_idx, - INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; @@ -4669,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); } +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp = bp->cp_nr_rings; + int ulp_msix, ulp_base; + + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (ulp_msix) { + ulp_base = bnxt_get_ulp_msix_base(bp); + cp += ulp_msix; + if ((ulp_base + ulp_msix) > cp) + cp = ulp_base + ulp_msix; + } + return cp; +} + +static bool bnxt_need_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); + int rx = bp->rx_nr_rings; + int vnic = 1, grp = rx; + + if (bp->hwrm_spec_code < 0x10601) + return false; + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + return true; + + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + return true; + return false; +} + static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, bool shared); static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; - int cp = bp->cp_nr_rings; int grp, rx_rings, rc; bool sh = false; int vnic = 1; - if (bp->hwrm_spec_code < 0x10601) + if (!bnxt_need_reserve_rings(bp)) return 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -4691,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - grp = bp->rx_nr_rings; - if (tx == hw_resc->resv_tx_rings && - (!(bp->flags & BNXT_FLAG_NEW_RM) || - (rx == hw_resc->resv_rx_rings && - grp == hw_resc->resv_hw_ring_grps && - cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics))) - return 0; rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); if (rc) @@ -4742,30 +4843,6 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static bool bnxt_need_reserve_rings(struct bnxt *bp) -{ - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int rx = bp->rx_nr_rings; - int vnic = 1; - - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) - return true; - - if (bp->flags & BNXT_FLAG_RFS) - vnic = rx + 1; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - if ((bp->flags & BNXT_FLAG_NEW_RM) && - (hw_resc->resv_rx_rings != rx || - hw_resc->resv_cp_rings != bp->cp_nr_rings || - hw_resc->resv_vnics != vnic)) - return true; - return false; -} - static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int vnics) { @@ -5055,7 +5132,7 @@ func_qcfg_exit: return rc; } -static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; @@ -5072,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) goto hwrm_func_resc_qcaps_exit; } + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + if (!all) + goto hwrm_func_resc_qcaps_exit; + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); @@ -5178,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) return rc; if (bp->hwrm_spec_code >= 0x10803) { - rc = bnxt_hwrm_func_resc_qcaps(bp); + rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; } @@ -5326,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) return rc; } +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -5418,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.cache_linesize = - FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -5740,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp) } for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); char *attr; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -5749,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp) else attr = "tx"; - snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, - i); - bp->irq_tbl[i].handler = bnxt_msix; + snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bp->irq_tbl[map_idx].handler = bnxt_msix; } } @@ -5812,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) bp->hw_resc.max_cp_rings = max; } -static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -5824,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +int bnxt_get_avail_msix(struct bnxt *bp, int num) +{ + int max_cp = bnxt_get_max_func_cp_rings(bp); + int max_irq = bnxt_get_max_func_irqs(bp); + int total_req = bp->cp_nr_rings + num; + int max_idx, avail_msix; + + max_idx = min_t(int, bp->total_irqs, max_cp); + avail_msix = max_idx - bp->cp_nr_rings; + if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) + return avail_msix; + + if (max_irq < total_req) { + num = max_irq - bp->cp_nr_rings; + if (num <= 0) + return 0; + } + return num; +} + +static int bnxt_get_num_msix(struct bnxt *bp) +{ + if (!(bp->flags & BNXT_FLAG_NEW_RM)) + return bnxt_get_max_func_irqs(bp); + + return bnxt_cp_rings_in_use(bp); +} + static int bnxt_init_msix(struct bnxt *bp) { - int i, total_vecs, rc = 0, min = 1; + int i, total_vecs, max, rc = 0, min = 1, ulp_msix; struct msix_entry *msix_ent; - total_vecs = bnxt_get_max_func_irqs(bp); + total_vecs = bnxt_get_num_msix(bp); + max = bnxt_get_max_func_irqs(bp); + if (total_vecs > max) + total_vecs = max; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -5843,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp) min = 2; total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); - if (total_vecs < 0) { + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (total_vecs < 0 || total_vecs < ulp_msix) { rc = -ENODEV; goto msix_setup_exit; } @@ -5856,7 +5985,7 @@ static int bnxt_init_msix(struct bnxt *bp) bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, - total_vecs, min == 1); + total_vecs - ulp_msix, min == 1); if (rc) goto msix_setup_exit; @@ -5920,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_USING_MSIX; } -static int bnxt_reserve_rings(struct bnxt *bp) +int bnxt_reserve_rings(struct bnxt *bp) { - int orig_cp = bp->hw_resc.resv_cp_rings; int tcs = netdev_get_num_tc(bp->dev); int rc; @@ -5934,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); return rc; } - if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) { + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (bnxt_get_num_msix(bp) != bp->total_irqs)) { + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); if (rc) return rc; } @@ -5963,7 +6094,9 @@ static void bnxt_free_irq(struct bnxt *bp) return; for (i = 0; i < bp->cp_nr_rings; i++) { - irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + irq = &bp->irq_tbl[map_idx]; if (irq->requested) { if (irq->have_cpumask) { irq_set_affinity_hint(irq->vector, NULL); @@ -5982,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp) int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; + struct cpu_rmap *rmap; #endif + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } +#ifdef CONFIG_RFS_ACCEL + rmap = bp->dev->rx_cpu_rmap; +#endif if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_irq *irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; + #ifdef CONFIG_RFS_ACCEL if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); @@ -6709,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_reserve_rings(bp); if (rc) return rc; - - rc = bnxt_setup_int_mode(bp); - if (rc) { - netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", - rc); - return rc; - } } if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { @@ -7478,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_port_qstats_ext(bp); + } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { int rc; @@ -8193,6 +8332,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_set_vf_rate = bnxt_set_vf_bw, .ndo_set_vf_link_state = bnxt_set_vf_link_state, .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, + .ndo_set_vf_trust = bnxt_set_vf_trust, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnxt_poll_controller, @@ -8390,9 +8530,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); - /* Reduce default rings to reduce memory usage on multi-port cards */ - if (bp->port_count > 1) - dflt_rings = min_t(int, dflt_rings, 4); + /* Reduce default rings on multi-port cards so that total default + * rings do not exceed CPU count. + */ + if (bp->port_count > 1) { + int max_rings = + max_t(int, num_online_cpus() / bp->port_count, 1); + + dflt_rings = min_t(int, dflt_rings, max_rings); + } rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -8431,16 +8577,15 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) int rc; ASSERT_RTNL(); - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - return 0; - bnxt_hwrm_func_qcaps(bp); if (netif_running(bp->dev)) __bnxt_close_nic(bp, true, false); + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); if (netif_running(bp->dev)) { if (rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5e3d62189cab..3d55d3b56865 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.9.0" +#define DRV_MODULE_VERSION "1.9.1" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 9 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -573,6 +573,10 @@ struct bnxt_ring_struct { void **vmem; u16 fw_ring_id; /* Ring id filled by Chimp FW */ + union { + u16 grp_idx; + u16 map_idx; /* Used by cmpl rings */ + }; u8 queue_id; }; @@ -786,6 +790,7 @@ struct bnxt_hw_resc { u16 min_tx_rings; u16 max_tx_rings; u16 resv_tx_rings; + u16 max_tx_sch_inputs; u16 min_rx_rings; u16 max_rx_rings; u16 resv_rx_rings; @@ -815,6 +820,7 @@ struct bnxt_vf_info { #define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 +#define BNXT_VF_TRUST 0x10 u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; @@ -1151,7 +1157,9 @@ struct bnxt { #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 + #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_NEW_RM 0x8000000 + #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1271,8 +1279,10 @@ struct bnxt { struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; + struct rx_port_stats_ext *hw_rx_port_stats_ext; dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_tx_port_stats_map; + dma_addr_t hw_rx_port_stats_ext_map; int hw_port_stats_size; u16 hwrm_max_req_len; @@ -1383,6 +1393,9 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) +#define BNXT_RX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct rx_port_stats_ext, counter) / 8) + #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e @@ -1402,6 +1415,15 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } +/* For TX and RX ring doorbells with no ordering guarantee*/ +static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db, + u32 val) +{ + writel_relaxed(val, db); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel_relaxed(val, db); +} + /* For TX and RX ring doorbells */ static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val) { @@ -1432,13 +1454,17 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); +int bnxt_get_avail_msix(struct bnxt *bp, int num); +int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index d2e0af960bf5..69efde785f23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,7 +34,8 @@ struct bnxt_cos2bw_cfg { }; #define BNXT_LLQ(q_profile) \ - ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS) + ((q_profile) == \ + QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1801582076be..8d8ccd67e0e2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,6 +137,9 @@ reset_coalesce: #define BNXT_TX_STATS_ENTRY(counter) \ { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } +#define BNXT_RX_STATS_EXT_ENTRY(counter) \ + { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -181,6 +184,8 @@ static const struct { BNXT_RX_STATS_ENTRY(rx_bytes), BNXT_RX_STATS_ENTRY(rx_runt_bytes), BNXT_RX_STATS_ENTRY(rx_runt_frames), + BNXT_RX_STATS_ENTRY(rx_stat_discard), + BNXT_RX_STATS_ENTRY(rx_stat_err), BNXT_TX_STATS_ENTRY(tx_64b_frames), BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), @@ -216,9 +221,24 @@ static const struct { BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), BNXT_TX_STATS_ENTRY(tx_total_collisions), BNXT_TX_STATS_ENTRY(tx_bytes), + BNXT_TX_STATS_ENTRY(tx_xthol_frames), + BNXT_TX_STATS_ENTRY(tx_stat_discard), + BNXT_TX_STATS_ENTRY(tx_stat_error), +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_ext_arr[] = { + BNXT_RX_STATS_EXT_ENTRY(link_down_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), }; #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) static int bnxt_get_num_stats(struct bnxt *bp) { @@ -227,6 +247,9 @@ static int bnxt_get_num_stats(struct bnxt *bp) if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) + num_stats += BNXT_NUM_PORT_STATS_EXT; + return num_stats; } @@ -274,6 +297,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, bnxt_port_stats_arr[i].offset)); } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; + + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) { + buf[j] = le64_to_cpu(*(port_stats_ext + + bnxt_port_stats_ext_arr[i].offset)); + } + } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -334,6 +365,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) buf += ETH_GSTRING_LEN; } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) { + strcpy(buf, bnxt_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } break; case ETH_SS_TEST: if (bp->num_tests) @@ -388,15 +425,26 @@ static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int max_rx_rings, max_tx_rings, tcs; + int max_tx_sch_inputs; + + /* Get the most up-to-date max_tx_sch_inputs. */ + if (bp->flags & BNXT_FLAG_NEW_RM) + bnxt_hwrm_func_resc_qcaps(bp, false); + max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { max_rx_rings = 0; max_tx_rings = 0; } + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); tcs = netdev_get_num_tc(dev); if (tcs > 1) @@ -2535,16 +2583,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + *flags = 0; + } } else if (*flags == ETH_RESET_AP) { /* This feature is not supported in older firmware versions */ if (bp->hwrm_spec_code < 0x10803) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset Application Processor request successful.\n"); + *flags = 0; + } } else { rc = -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 82d17f8cc0db..0fe0ea8dce6c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -188,6 +188,7 @@ struct cmd_nums { #define HWRM_STAT_CTX_FREE 0xb1UL #define HWRM_STAT_CTX_QUERY 0xb2UL #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_SET_TIME 0xc8UL @@ -199,6 +200,7 @@ struct cmd_nums { #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL @@ -271,6 +273,7 @@ struct cmd_nums { #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -341,9 +344,9 @@ struct hwrm_err_output { #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 0 -#define HWRM_VERSION_STR "1.9.0.0" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 15 +#define HWRM_VERSION_STR "1.9.1.15" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -616,30 +619,6 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; -/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ -struct hwrm_async_event_cmpl_pf_drvr_unload { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT - __le16 event_id; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 -}; - /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -854,6 +833,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -966,10 +946,14 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2 __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; @@ -1124,10 +1108,14 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2 __le16 num_mcast_filters; }; @@ -1248,7 +1236,7 @@ struct hwrm_func_vf_vnic_ids_query_output { u8 valid; }; -/* hwrm_func_drv_rgtr_input (size:832b/104B) */ +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ struct hwrm_func_drv_rgtr_input { __le16 req_type; __le16 cmpl_ring; @@ -1256,8 +1244,9 @@ struct hwrm_func_drv_rgtr_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1277,14 +1266,18 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[3]; __le32 timestamp; u8 unused_1[4]; __le32 vf_req_fwd[8]; __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_drv_rgtr_output (size:128b/16B) */ @@ -1379,7 +1372,7 @@ struct hwrm_func_drv_qver_input { u8 unused_0[2]; }; -/* hwrm_func_drv_qver_output (size:128b/16B) */ +/* hwrm_func_drv_qver_output (size:192b/24B) */ struct hwrm_func_drv_qver_output { __le16 error_code; __le16 req_type; @@ -1398,11 +1391,15 @@ struct hwrm_func_drv_qver_output { #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[2]; u8 valid; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_resource_qcaps_input (size:192b/24B) */ @@ -1416,7 +1413,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:384b/48B) */ +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -1425,9 +1422,10 @@ struct hwrm_func_resource_qcaps_output { __le16 max_vfs; __le16 max_msix; __le16 vf_reservation_strategy; - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC __le16 min_rsscos_ctx; __le16 max_rsscos_ctx; __le16 min_cmpl_rings; @@ -1444,7 +1442,8 @@ struct hwrm_func_resource_qcaps_output { __le16 max_stat_ctx; __le16 min_hw_ring_grps; __le16 max_hw_ring_grps; - u8 unused_0; + __le16 max_tx_scheduler_inputs; + u8 unused_0[7]; u8 valid; }; @@ -1627,6 +1626,16 @@ struct hwrm_port_phy_cfg_output { u8 valid; }; +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + /* hwrm_port_phy_qcfg_input (size:192b/24B) */ struct hwrm_port_phy_qcfg_input { __le16 req_type; @@ -2030,6 +2039,33 @@ struct hwrm_port_qstats_output { u8 valid; }; +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[2]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_port_lpbk_qstats_input (size:128b/16B) */ struct hwrm_port_lpbk_qstats_input { __le16 req_type; @@ -2552,7 +2588,11 @@ struct hwrm_queue_qportcfg_input { #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX __le16 port_id; - u8 unused_0[2]; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; }; /* hwrm_queue_qportcfg_output (size:256b/32B) */ @@ -2571,52 +2611,68 @@ struct hwrm_queue_qportcfg_output { u8 queue_cos2bw_cfg_allowed; u8 queue_id0; u8 queue_id0_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN u8 queue_id1; u8 queue_id1_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN u8 queue_id2; u8 queue_id2_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN u8 queue_id3; u8 queue_id3_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN u8 queue_id4; u8 queue_id4_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN u8 queue_id5; u8 queue_id5_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN u8 queue_id6; u8 queue_id6_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN u8 queue_id7; u8 queue_id7_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN u8 valid; }; @@ -5180,6 +5236,29 @@ struct hwrm_stat_ctx_clr_stats_output { u8 valid; }; +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + /* tx_port_stats (size:3264b/408B) */ struct tx_port_stats { __le64 tx_64b_frames; @@ -5305,6 +5384,30 @@ struct rx_port_stats { __le64 rx_stat_err; }; +/* rx_port_stats_ext (size:320b/40B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + /* hwrm_fw_reset_input (size:192b/24B) */ struct hwrm_fw_reset_input { __le16 req_type; @@ -5313,14 +5416,15 @@ struct hwrm_fw_reset_input { __le16 target_id; __le64 resp_addr; u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL @@ -6253,8 +6357,7 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL - u8 pcie_lane_num; - u8 unused_0[6]; + u8 unused_0[7]; }; /* hwrm_selftest_exec_output (size:128b/16B) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d87faad901fe..f952963d594e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) return rc; } +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + + if (bnxt_vf_ndo_prep(bp, vf_id)) + return -EINVAL; + + vf = &bp->pf.vf[vf_id]; + if (trusted) + vf->flags |= BNXT_VF_TRUST; + else + vf->flags &= ~BNXT_VF_TRUST; + + return 0; +} + int bnxt_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) { @@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, else ivi->qos = 0; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); + ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -492,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) } mutex_unlock(&bp->hwrm_cmd_lock); if (pf->active_vfs) { - u16 n = 1; + u16 n = pf->active_vfs; - if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL) - n = pf->active_vfs; - - hw_resc->max_tx_rings -= vf_tx_rings * n; - hw_resc->max_rx_rings -= vf_rx_rings * n; - hw_resc->max_hw_ring_grps -= vf_ring_grps * n; - hw_resc->max_cp_rings -= vf_cp_rings * n; + hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * + n; + hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; hw_resc->max_rsscos_ctxs -= pf->active_vfs; - hw_resc->max_stat_ctxs -= vf_stat_ctx * n; - hw_resc->max_vnics -= vf_vnics * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; rc = pf->active_vfs; } @@ -886,18 +902,19 @@ exec_fwd_resp_exit: return rc; } -static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) { u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); struct hwrm_func_vf_cfg_input *req = (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; - /* Only allow VF to set a valid MAC address if the PF assigned MAC - * address is zero + /* Allow VF to set a valid MAC address, if trust is set to on or + * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { if (is_valid_ether_addr(req->dflt_mac_addr) && - !is_valid_ether_addr(vf->mac_addr)) { + ((vf->flags & BNXT_VF_TRUST) || + (!is_valid_ether_addr(vf->mac_addr)))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); } @@ -913,11 +930,17 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; bool mac_ok = false; - /* VF MAC address must first match PF MAC address, if it is valid. + if (!is_valid_ether_addr((const u8 *)req->l2_addr)) + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + + /* Allow VF to set a valid MAC address, if trust is set to on. + * Or VF MAC address must first match MAC address in PF's context. * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (is_valid_ether_addr(vf->mac_addr)) { + if (vf->flags & BNXT_VF_TRUST) { + mac_ok = true; + } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) mac_ok = true; } else if (is_valid_ether_addr(vf->vf_mac_addr)) { @@ -951,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); mutex_unlock(&bp->hwrm_cmd_lock); + phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { /* if physical link is down, force link up on VF */ @@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) switch (req_type) { case HWRM_FUNC_VF_CFG: - rc = bnxt_vf_store_mac(bp, vf); + rc = bnxt_vf_configure_mac(bp, vf); break; case HWRM_CFA_L2_FILTER_ALLOC: rc = bnxt_vf_validate_set_mac(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index dbc8d977fc5a..d10f6f6c7860 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); int bnxt_set_vf_bw(struct net_device *, int, int, int); int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 997e10e8b863..347e4f946eb2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,13 +101,28 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) return 0; } +static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) +{ + struct bnxt_en_dev *edev = bp->edev; + int num_msix, idx, i; + + num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + for (i = 0; i < num_msix; i++) { + ent[i].vector = bp->irq_tbl[idx + i].vector; + ent[i].ring_idx = idx + i; + ent[i].db_offset = (idx + i) * 0x80; + } +} + static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, struct bnxt_msix_entry *ent, int num_msix) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); int max_idx, max_cp_rings; - int avail_msix, i, idx; + int avail_msix, idx; + int rc = 0; ASSERT_RTNL(); if (ulp_id != BNXT_ROCE_ULP) @@ -116,23 +131,47 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, if (!(bp->flags & BNXT_FLAG_USING_MSIX)) return -ENODEV; + if (edev->ulp_tbl[ulp_id].msix_requested) + return -EAGAIN; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); - max_idx = min_t(int, bp->total_irqs, max_cp_rings); - avail_msix = max_idx - bp->cp_nr_rings; + avail_msix = bnxt_get_avail_msix(bp, num_msix); if (!avail_msix) return -ENOMEM; if (avail_msix > num_msix) avail_msix = num_msix; - idx = max_idx - avail_msix; - for (i = 0; i < avail_msix; i++) { - ent[i].vector = bp->irq_tbl[idx + i].vector; - ent[i].ring_idx = idx + i; - ent[i].db_offset = (idx + i) * 0x80; + if (bp->flags & BNXT_FLAG_NEW_RM) { + idx = bp->cp_nr_rings; + } else { + max_idx = min_t(int, bp->total_irqs, max_cp_rings); + idx = max_idx - avail_msix; } - bnxt_set_max_func_irqs(bp, max_idx - avail_msix); - bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + if (bp->total_irqs < (idx + avail_msix)) { + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_reserve_rings(bp); + } + } + if (rc) { + edev->ulp_tbl[ulp_id].msix_requested = 0; + return -EAGAIN; + } + + if (bp->flags & BNXT_FLAG_NEW_RM) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; + edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + } + bnxt_fill_msix_vecs(bp, ent); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix); + bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; return avail_msix; } @@ -146,11 +185,40 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) if (ulp_id != BNXT_ROCE_ULP) return -EINVAL; + if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return 0; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); msix_requested = edev->ulp_tbl[ulp_id].msix_requested; bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); edev->ulp_tbl[ulp_id].msix_requested = 0; - bnxt_set_max_func_irqs(bp, bp->total_irqs); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested); + edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + bnxt_open_nic(bp, true, false); + } + return 0; +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + } + return 0; +} + +int bnxt_get_ulp_msix_base(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested) + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + } return 0; } @@ -287,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp) } } +void bnxt_ulp_irq_stop(struct bnxt *bp) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_stop) + return; + ops->ulp_irq_stop(ulp->handle); + } +} + +void bnxt_ulp_irq_restart(struct bnxt *bp, int err) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + struct bnxt_msix_entry *ent = NULL; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_restart) + return; + + if (!err) { + ent = kcalloc(ulp->msix_requested, sizeof(*ent), + GFP_KERNEL); + if (!ent) + return; + bnxt_fill_msix_vecs(bp, ent); + } + ops->ulp_irq_restart(ulp->handle, ent); + kfree(ent); + } +} + void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { u16 event_id = le16_to_cpu(cmpl->event_id); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index d2471067dc37..df48ac71729f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,6 +20,12 @@ struct hwrm_async_event_cmpl; struct bnxt; +struct bnxt_msix_entry { + u32 vector; + u32 ring_idx; + u32 db_offset; +}; + struct bnxt_ulp_ops { /* async_notifier() cannot sleep (in BH context) */ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); @@ -27,12 +33,8 @@ struct bnxt_ulp_ops { void (*ulp_start)(void *); void (*ulp_sriov_config)(void *, int); void (*ulp_shutdown)(void *); -}; - -struct bnxt_msix_entry { - u32 vector; - u32 ring_idx; - u32 db_offset; + void (*ulp_irq_stop)(void *); + void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *); }; struct bnxt_fw_msg { @@ -49,6 +51,7 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; + u16 msix_base; atomic_t ref_count; }; @@ -60,6 +63,7 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ROCEV2_CAP 0x2 #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ BNXT_EN_FLAG_ROCEV2_CAP) + #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 const struct bnxt_en_ops *en_ops; struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; }; @@ -84,11 +88,15 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) return false; } +int bnxt_get_ulp_msix_num(struct bnxt *bp); +int bnxt_get_ulp_msix_base(struct bnxt *bp); void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); void bnxt_ulp_shutdown(struct bnxt *bp); +void bnxt_ulp_irq_stop(struct bnxt *bp); +void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev); |