diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c | 482 |
1 files changed, 482 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c index 5c178fade065..198f49b40dbf 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c @@ -6,6 +6,8 @@ #include <linux/mm.h> #include <linux/pci.h> #include <linux/bnxt/hsi.h> +#include <linux/if_vlan.h> +#include <net/netdev_queues.h> #include "bnge.h" #include "bnge_hwrm.h" @@ -701,3 +703,483 @@ qportcfg_exit: bnge_hwrm_req_drop(bd, req); return rc; } + +int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh; + struct hwrm_vnic_plcmodes_cfg_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); + req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID); + req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size); + + if (bnge_is_agg_reqd(bd)) { + req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables |= + cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID); + req->hds_threshold = cpu_to_le16(hds_thresh); + } + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + vnic->fw_rss_cos_lb_ctx[ctx_idx] = + le16_to_cpu(resp->rss_cos_lb_ctx_id); + bnge_hwrm_req_drop(bd, req); + + return rc; +} + +static void +__bnge_hwrm_vnic_set_rss(struct bnge_net *bn, + struct hwrm_vnic_rss_cfg_input *req, + struct bnge_vnic_info *vnic) +{ + struct bnge_dev *bd = bn->bd; + + bnge_fill_hw_rss_tbl(bn, vnic); + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; + + req->hash_type = cpu_to_le32(bd->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); +} + +int bnge_hwrm_vnic_set_rss(struct bnge_net *bn, + struct bnge_vnic_info *vnic, bool set_rss) +{ + struct hwrm_vnic_rss_cfg_input *req; + struct bnge_dev *bd = bn->bd; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return bnge_hwrm_req_send(bd, req); + + __bnge_hwrm_vnic_set_rss(bn, req, vnic); + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings); + + bnge_hwrm_req_hold(bd, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto exit; + } + +exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic) +{ + struct bnge_rx_ring_info *rxr = &bn->rx_ring[0]; + struct hwrm_vnic_cfg_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG); + if (rc) + return rc; + + req->default_rx_ring_id = + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); + req->default_cmpl_ring_id = + cpu_to_le16(bnge_cp_ring_for_rx(rxr)); + req->enables = + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); + vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN; + req->mru = cpu_to_le16(vnic->mru); + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + + if (bd->flags & BNGE_EN_STRIP_VLAN) + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd)) + req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE); + + return bnge_hwrm_req_send(bd, req); +} + +void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn) +{ + struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT]; + struct hwrm_vnic_rss_qcfg_output *resp; + struct hwrm_vnic_rss_qcfg_input *req; + struct bnge_dev *bd = bn->bd; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG)) + return; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + /* all contexts configured to same hash_type, zero always exists */ + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + resp = bnge_hwrm_req_hold(bd, req); + if (!bnge_hwrm_req_send(bd, req)) + bd->rss_hash_cfg = + le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg; + bnge_hwrm_req_drop(bd, req); +} + +int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_free_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + + req->l2_filter_id = fltr->base.filter_id; + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + + req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); + req->enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); + eth_broadcast_addr(req->l2_addr_mask); + + if (fltr->l2_key.vlan) { + req->enables |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); + req->num_vlans = 1; + req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); + req->l2_ivlan_mask = cpu_to_le16(0xfff); + } + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + fltr->base.filter_id = resp->l2_filter_id; + + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd, + struct bnge_vnic_info *vnic) +{ + struct hwrm_cfa_l2_set_rx_mask_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } + req->mask = cpu_to_le32(vnic->rx_mask); + return bnge_hwrm_req_send_silent(bd, req); +} + +int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic, + unsigned int nr_rings) +{ + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + unsigned int i; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; + + for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++) + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; + if (vnic->vnic_id == BNGE_VNIC_DEFAULT) + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); + bnge_hwrm_req_drop(bd, req); + return rc; +} + +void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic) +{ + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { + struct hwrm_vnic_free_input *req; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + bnge_hwrm_req_send(bd, req); + vnic->fw_vnic_id = INVALID_HW_RING_ID; + } +} + +void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd, + struct bnge_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; + + req->rss_cos_lb_ctx_id = + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); + + bnge_hwrm_req_send(bd, req); + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; +} + +void bnge_hwrm_stat_ctx_free(struct bnge_net *bn) +{ + struct hwrm_stat_ctx_free_input *req; + struct bnge_dev *bd = bn->bd; + int i; + + if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE)) + return; + + bnge_hwrm_req_hold(bd, req); + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { + req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id); + bnge_hwrm_req_send(bd, req); + + nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + } + bnge_hwrm_req_drop(bd, req); +} + +int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn) +{ + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + struct bnge_dev *bd = bn->bd; + int rc, i; + + rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; + + req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000); + + resp = bnge_hwrm_req_hold(bd, req); + for (i = 0; i < bd->nq_nr_rings; i++) { + struct bnge_napi *bnapi = bn->bnapi[i]; + struct bnge_nq_ring_info *nqr = &bnapi->nq_ring; + + req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map); + + rc = bnge_hwrm_req_send(bd, req); + if (rc) + break; + + nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); + bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id; + } + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int hwrm_ring_free_send_msg(struct bnge_net *bn, + struct bnge_ring_struct *ring, + u32 ring_type, int cmpl_ring_id) +{ + struct hwrm_ring_free_input *req; + struct bnge_dev *bd = bn->bd; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE); + if (rc) + goto exit; + + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + + bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + bnge_hwrm_req_drop(bd, req); +exit: + if (rc) { + netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc); + return -EIO; + } + return 0; +} + +int hwrm_ring_alloc_send_msg(struct bnge_net *bn, + struct bnge_ring_struct *ring, + u32 ring_type, u32 map_index) +{ + struct bnge_ring_mem_info *rmem = &ring->ring_mem; + struct bnge_ring_grp_info *grp_info; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; + struct bnge_dev *bd = bn->bd; + u16 ring_id, flags = 0; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC); + if (rc) + goto exit; + + req->enables = 0; + if (rmem->nr_pages > 1) { + req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl); + /* Page size is in log2 units */ + req->page_size = BNGE_PAGE_SHIFT; + req->page_tbl_depth = 1; + } else { + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + } + req->fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req->logical_id = cpu_to_le16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: { + struct bnge_tx_ring_info *txr; + + txr = container_of(ring, struct bnge_tx_ring_info, + tx_ring_struct); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + /* Association of transmit ring with completion ring */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr)); + req->length = cpu_to_le32(bn->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); + req->flags = cpu_to_le16(flags); + break; + } + case HWRM_RING_ALLOC_RX: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bn->rx_ring_mask + 1); + + /* Association of rx ring with stats context */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + if (NET_IP_ALIGN == 2) + flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; + req->flags = cpu_to_le16(flags); + break; + case HWRM_RING_ALLOC_AGG: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + /* Association of agg ring with rx ring */ + grp_info = &bn->grp_info[ring->grp_idx]; + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | + RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); + req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1); + break; + case HWRM_RING_ALLOC_CMPL: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bn->cp_ring_mask + 1); + /* Association of cp ring with nq */ + grp_info = &bn->grp_info[map_index]; + req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= + cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + break; + case HWRM_RING_ALLOC_NQ: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bn->cp_ring_mask + 1); + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + break; + default: + netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type); + return -EINVAL; + } + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + ring_id = le16_to_cpu(resp->ring_id); + bnge_hwrm_req_drop(bd, req); + +exit: + if (rc) { + netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc); + return -EIO; + } + ring->fw_ring_id = ring_id; + return rc; +} + +int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx) +{ + struct hwrm_func_cfg_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return bnge_hwrm_req_send(bd, req); +} |