diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af')
17 files changed, 621 insertions, 137 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index c8724bfa86b0..724df6398bbe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -64,6 +64,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, { 0, } /* end of table */ }; @@ -73,12 +74,13 @@ static bool is_dev_rpm(void *cgxd) { struct cgx *cgx = cgxd; - return (cgx->pdev->device == PCI_DEVID_CN10K_RPM); + return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || + (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); } bool is_lmac_valid(struct cgx *cgx, int lmac_id) { - if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) return false; return test_bit(lmac_id, &cgx->lmac_bmap); } @@ -90,7 +92,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) { int tmp, id = 0; - for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { if (tmp == lmac_id) break; id++; @@ -121,7 +123,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) { - if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id >= cgx->max_lmac_per_mac) return NULL; return cgx->lmac_idmap[lmac_id]; @@ -485,7 +487,7 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; - cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); + cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); return 0; } @@ -740,6 +742,10 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) if (!cgx || lmac_id >= cgx->lmac_count) return -ENODEV; + + if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) + return 0; + fec_stats_count = cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { @@ -768,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); if (enable) - cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; + cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; else - cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); + cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); return 0; } @@ -1224,7 +1230,7 @@ static inline void link_status_user_format(u64 lstat, linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); - linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); + linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); } @@ -1395,7 +1401,7 @@ int cgx_get_fwdata_base(u64 *base) if (!cgx) return -ENXIO; - first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); if (!err) @@ -1484,7 +1490,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { - int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); u64 req = 0; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); @@ -1522,7 +1528,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work) int i, err; /* Do Link up for all the enabled lmacs */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { err = cgx_fwi_link_change(cgx, i, true); if (err) dev_info(dev, "cgx port %d:%d Link up command failed\n", @@ -1542,14 +1548,6 @@ int cgx_lmac_linkup_start(void *cgxd) return 0; } -static void cgx_lmac_get_fifolen(struct cgx *cgx) -{ - u64 cfg; - - cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); -} - static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, int cnt, bool req_free) { @@ -1604,17 +1602,20 @@ static int cgx_lmac_init(struct cgx *cgx) u64 lmac_list; int i, err; - cgx_lmac_get_fifolen(cgx); - - cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled */ - if (cgx->mac_ops->non_contiguous_serdes_lane) - lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; + if (cgx->mac_ops->non_contiguous_serdes_lane) { + if (is_dev_rpm2(cgx)) + lmac_list = + cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL; + else + lmac_list = + cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; + } - if (cgx->lmac_count > MAX_LMAC_PER_CGX) - cgx->lmac_count = MAX_LMAC_PER_CGX; + if (cgx->lmac_count > cgx->max_lmac_per_mac) + cgx->lmac_count = cgx->max_lmac_per_mac; for (i = 0; i < cgx->lmac_count; i++) { lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); @@ -1635,7 +1636,9 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->cgx = cgx; lmac->mac_to_index_bmap.max = - MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + cgx->mac_ops->dmac_filter_count / + cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); if (err) goto err_name_free; @@ -1692,7 +1695,7 @@ static int cgx_lmac_exit(struct cgx *cgx) } /* Free all lmac related resources */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { lmac = cgx->lmac_idmap[i]; if (!lmac) continue; @@ -1708,6 +1711,12 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { + u64 cfg; + + cfg = cgx_read(cgx, 0, CGX_CONST); + cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); + if (is_dev_rpm(cgx)) cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); @@ -1716,6 +1725,15 @@ static void cgx_populate_features(struct cgx *cgx) RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } +static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) +{ + if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || + is_dev_rpm2(cgx)) + return 0x80; + else + return 0x60; +} + static struct mac_ops cgx_mac_ops = { .name = "cgx", .csr_offset = 0, @@ -1728,12 +1746,14 @@ static struct mac_ops cgx_mac_ops = { .non_contiguous_serdes_lane = false, .rx_stats_cnt = 9, .tx_stats_cnt = 18, + .dmac_filter_count = 32, .get_nr_lmacs = cgx_get_nr_lmacs, .get_lmac_type = cgx_get_lmac_type, .lmac_fifo_len = cgx_get_lmac_fifo_len, .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, .mac_get_rx_stats = cgx_get_rx_stats, .mac_get_tx_stats = cgx_get_tx_stats, + .get_fec_stats = cgx_get_fec_stats, .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, @@ -1759,11 +1779,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, cgx); /* Use mac_ops to get MAC specific features */ - if (pdev->device == PCI_DEVID_CN10K_RPM) - cgx->mac_ops = rpm_get_mac_ops(); + if (is_dev_rpm(cgx)) + cgx->mac_ops = rpm_get_mac_ops(cgx); else cgx->mac_ops = &cgx_mac_ops; + cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); + err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 0b06788b8d80..5a20d93004c7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -18,11 +18,7 @@ /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 -#define CGX_ID_MASK 0x7 -#define MAX_LMAC_PER_CGX 4 -#define MAX_DMAC_ENTRIES_PER_CGX 32 -#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ -#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) +#define CGX_ID_MASK 0xF /* Registers */ #define CGXX_CMRX_CFG 0x00 @@ -30,7 +26,6 @@ #define CMR_P2X_SEL_SHIFT 59ULL #define CMR_P2X_SEL_NIX0 1ULL #define CMR_P2X_SEL_NIX1 2ULL -#define CMR_EN BIT_ULL(55) #define DATA_PKT_TX_EN BIT_ULL(53) #define DATA_PKT_RX_EN BIT_ULL(54) #define CGX_LMAC_TYPE_SHIFT 40 @@ -56,7 +51,8 @@ #define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH1_REG 0x1058 #define CGX_CONST 0x2000 -#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0) +#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(55, 32) +#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24) #define CGXX_SPUX_CONTROL1 0x10000 #define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700 #define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 52b6016789fa..39aaf0e4467d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -75,6 +75,11 @@ struct mac_ops { /* RPM & CGX differs in number of Receive/transmit stats */ u8 rx_stats_cnt; u8 tx_stats_cnt; + /* Unlike CN10K which shares same CSR offset with CGX + * CNF10KB has different csr offset + */ + u64 rxid_map_offset; + u8 dmac_filter_count; /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST] * number of setbits in lmac_exist tells number of lmacs */ @@ -121,6 +126,9 @@ struct mac_ops { int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id, u8 *tx_pause, u8 *rx_pause); + /* FEC stats */ + int (*get_fec_stats)(void *cgxd, int lmac_id, + struct cgx_fec_stats_rsp *rsp); }; struct cgx { @@ -128,7 +136,10 @@ struct cgx { struct pci_dev *pdev; u8 cgx_id; u8 lmac_count; - struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + /* number of LMACs per MAC could be 4 or 8 */ + u8 max_lmac_per_mac; +#define MAX_LMAC_COUNT 8 + struct lmac *lmac_idmap[MAX_LMAC_COUNT]; struct work_struct cgx_cmd_work; struct workqueue_struct *cgx_cmd_workq; struct list_head cgx_list; @@ -150,6 +161,6 @@ struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx); int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac); int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id); bool is_lmac_valid(struct cgx *cgx, int lmac_id); -struct mac_ops *rpm_get_mac_ops(void); +struct mac_ops *rpm_get_mac_ops(struct cgx *cgx); #endif /* LMAC_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 8d5d5a0f68c4..d2584ebb7a70 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -245,6 +245,9 @@ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \ npc_get_secret_key_req, \ npc_get_secret_key_rsp) \ +M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \ + npc_get_field_status_req, \ + npc_get_field_status_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ nix_lf_alloc_req, nix_lf_alloc_rsp) \ @@ -1437,6 +1440,10 @@ struct flow_msg { u8 tc; __be16 sport; __be16 dport; + union { + u8 ip_flag; + u8 next_header; + }; }; struct npc_install_flow_req { @@ -1541,6 +1548,17 @@ struct ptp_rsp { u64 clk; }; +struct npc_get_field_status_req { + struct mbox_msghdr hdr; + u8 intf; + u8 field; +}; + +struct npc_get_field_status_rsp { + struct mbox_msghdr hdr; + u8 enable; +}; + struct set_vf_perm { struct mbox_msghdr hdr; u16 vf; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c index c0bedf402da9..f68a6a0e3aa4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -1184,10 +1184,13 @@ static int mcs_register_interrupts(struct mcs *mcs) mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); if (!mcs->tx_sa_active) { ret = -ENOMEM; - goto exit; + goto free_irq; } return ret; + +free_irq: + free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs); exit: pci_free_irq_vectors(mcs->pdev); mcs->num_vec = 0; @@ -1589,6 +1592,7 @@ static void mcs_remove(struct pci_dev *pdev) /* Set MCS to external bypass */ mcs_set_external_bypass(mcs, true); + free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs); pci_free_irq_vectors(pdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index fa8029a94068..eb25e458266c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu, u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; struct mcs *mcs; - int rc; + int rc = 0; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index f187293e3e08..9beeead56d7b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -185,8 +185,10 @@ enum key_fields { NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, NPC_TOS, + NPC_IPFRAG_IPV4, NPC_SIP_IPV4, NPC_DIP_IPV4, + NPC_IPFRAG_IPV6, NPC_SIP_IPV6, NPC_DIP_IPV6, NPC_IPPROTO_TCP, @@ -620,6 +622,7 @@ struct rvu_npc_mcam_rule { bool vfvlan_cfg; u16 chan; u16 chan_mask; + u8 lxmb; }; #endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index a70e1153fa04..de0d88dd10d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -8,7 +8,7 @@ #include "cgx.h" #include "lmac_common.h" -static struct mac_ops rpm_mac_ops = { +static struct mac_ops rpm_mac_ops = { .name = "rpm", .csr_offset = 0x4e00, .lmac_offset = 20, @@ -20,12 +20,14 @@ static struct mac_ops rpm_mac_ops = { .non_contiguous_serdes_lane = true, .rx_stats_cnt = 43, .tx_stats_cnt = 34, + .dmac_filter_count = 32, .get_nr_lmacs = rpm_get_nr_lmacs, .get_lmac_type = rpm_get_lmac_type, .lmac_fifo_len = rpm_get_lmac_fifo_len, .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, .mac_get_rx_stats = rpm_get_rx_stats, .mac_get_tx_stats = rpm_get_tx_stats, + .get_fec_stats = rpm_get_fec_stats, .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, @@ -37,9 +39,50 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, }; -struct mac_ops *rpm_get_mac_ops(void) +static struct mac_ops rpm2_mac_ops = { + .name = "rpm", + .csr_offset = RPM2_CSR_OFFSET, + .lmac_offset = 20, + .int_register = RPM2_CMRX_SW_INT, + .int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S, + .irq_offset = 1, + .int_ena_bit = BIT_ULL(0), + .lmac_fwi = RPM_LMAC_FWI, + .non_contiguous_serdes_lane = true, + .rx_stats_cnt = 43, + .tx_stats_cnt = 34, + .dmac_filter_count = 64, + .get_nr_lmacs = rpm2_get_nr_lmacs, + .get_lmac_type = rpm_get_lmac_type, + .lmac_fifo_len = rpm2_get_lmac_fifo_len, + .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, + .mac_get_rx_stats = rpm_get_rx_stats, + .mac_get_tx_stats = rpm_get_tx_stats, + .get_fec_stats = rpm_get_fec_stats, + .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, + .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, + .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, + .mac_pause_frm_config = rpm_lmac_pause_frm_config, + .mac_enadis_ptp_config = rpm_lmac_ptp_config, + .mac_rx_tx_enable = rpm_lmac_rx_tx_enable, + .mac_tx_enable = rpm_lmac_tx_enable, + .pfc_config = rpm_lmac_pfc_config, + .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, +}; + +bool is_dev_rpm2(void *rpmd) +{ + rpm_t *rpm = rpmd; + + return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM); +} + +struct mac_ops *rpm_get_mac_ops(rpm_t *rpm) { - return &rpm_mac_ops; + if (is_dev_rpm2(rpm)) + return &rpm2_mac_ops; + else + return &rpm_mac_ops; } static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val) @@ -52,6 +95,16 @@ static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset) return cgx_read(rpm, lmac, offset); } +/* Read HW major version to determine RPM + * MAC type 100/USX + */ +static bool is_mac_rpmusx(void *rpmd) +{ + rpm_t *rpm = rpmd; + + return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL; +} + int rpm_get_nr_lmacs(void *rpmd) { rpm_t *rpm = rpmd; @@ -59,6 +112,13 @@ int rpm_get_nr_lmacs(void *rpmd) return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL); } +int rpm2_get_nr_lmacs(void *rpmd) +{ + rpm_t *rpm = rpmd; + + return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL); +} + int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; @@ -222,6 +282,46 @@ static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, } } +static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause) +{ + u64 cfg; + + cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP); + if (tx_pause) { + /* Configure CL0 Pause Quanta & threshold + * for 802.3X frames + */ + rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true); + cfg &= ~RPM2_CMR_RX_OVR_BP_EN; + } else { + /* Disable all Pause Quanta & threshold values */ + rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false); + cfg |= RPM2_CMR_RX_OVR_BP_EN; + cfg &= ~RPM2_CMR_RX_OVR_BP_BP; + } + rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg); +} + +static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause) +{ + u64 cfg; + + cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP); + if (tx_pause) { + /* Configure CL0 Pause Quanta & threshold for + * 802.3X frames + */ + rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true); + cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id); + } else { + /* Disable all Pause Quanta & threshold values */ + rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false); + cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id); + cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id); + } + rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg); +} + int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause) { @@ -243,18 +343,11 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); - cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP); - if (tx_pause) { - /* Configure CL0 Pause Quanta & threshold for 802.3X frames */ - rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true); - cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id); - } else { - /* Disable all Pause Quanta & threshold values */ - rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false); - cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id); - cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id); - } - rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg); + if (is_dev_rpm2(rpm)) + rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause); + else + rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause); + return 0; } @@ -278,13 +371,16 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Enable channel mask for all LMACS */ + if (is_dev_rpm2(rpm)) + rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff); + else + rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); + /* Disable all PFC classes */ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL); cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg); rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg); - - /* Enable channel mask for all LMACS */ - rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); } int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) @@ -292,7 +388,7 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) rpm_t *rpm = rpmd; u64 val_lo, val_hi; - if (!rpm || lmac_id >= rpm->lmac_count) + if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; mutex_lock(&rpm->lock); @@ -320,7 +416,7 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) rpm_t *rpm = rpmd; u64 val_lo, val_hi; - if (!rpm || lmac_id >= rpm->lmac_count) + if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; mutex_lock(&rpm->lock); @@ -380,13 +476,71 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id) return 0; } +static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable) +{ + u64 cfg; + + cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1); + + if (enable) + cfg |= RPM2_USX_PCS_LBK; + else + cfg &= ~RPM2_USX_PCS_LBK; + rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg); + + return 0; +} + +u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id) +{ + u64 hi_perf_lmac, lmac_info; + rpm_t *rpm = rpmd; + u8 num_lmacs; + u32 fifo_len; + + lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS); + /* LMACs are divided into two groups and each group + * gets half of the FIFO + * Group0 lmac_id range {0..3} + * Group1 lmac_id range {4..7} + */ + fifo_len = rpm->mac_ops->fifo_len / 2; + + if (lmac_id < 4) { + num_lmacs = hweight8(lmac_info & 0xF); + hi_perf_lmac = (lmac_info >> 8) & 0x3ULL; + } else { + num_lmacs = hweight8(lmac_info & 0xF0); + hi_perf_lmac = (lmac_info >> 10) & 0x3ULL; + hi_perf_lmac += 4; + } + + switch (num_lmacs) { + case 1: + return fifo_len; + case 2: + return fifo_len / 2; + case 3: + /* LMAC marked as hi_perf gets half of the FIFO + * and rest 1/4th + */ + if (lmac_id == hi_perf_lmac) + return fifo_len / 2; + return fifo_len / 4; + case 4: + default: + return fifo_len / 4; + } + return 0; +} + int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; u8 lmac_type; u64 cfg; - if (!rpm || lmac_id >= rpm->lmac_count) + if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id); @@ -395,6 +549,9 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) return 0; } + if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm)) + return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable); + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1); if (enable) @@ -439,8 +596,8 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en) { + u64 cfg, class_en, pfc_class_mask_cfg; rpm_t *rpm = rpmd; - u64 cfg, class_en; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; @@ -476,7 +633,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); - rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en); + pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : + RPMX_CMRX_PRT_CBFC_CTL; + + rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en); return 0; } @@ -497,3 +657,59 @@ int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_paus return 0; } + +int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp) +{ + u64 val_lo, val_hi; + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) + return 0; + + if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { + val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO); + val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + rsp->fec_corr_blks = (val_hi << 16 | val_lo); + + val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO); + val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + rsp->fec_uncorr_blks = (val_hi << 16 | val_lo); + + /* 50G uses 2 Physical serdes lines */ + if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id == + LMAC_MODE_50G_R) { + val_lo = rpm_read(rpm, lmac_id, + RPMX_MTI_FCFECX_VL1_CCW_LO); + val_hi = rpm_read(rpm, lmac_id, + RPMX_MTI_FCFECX_CW_HI); + rsp->fec_corr_blks += (val_hi << 16 | val_lo); + + val_lo = rpm_read(rpm, lmac_id, + RPMX_MTI_FCFECX_VL1_NCCW_LO); + val_hi = rpm_read(rpm, lmac_id, + RPMX_MTI_FCFECX_CW_HI); + rsp->fec_uncorr_blks += (val_hi << 16 | val_lo); + } + } else { + /* enable RS-FEC capture */ + cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id); + rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + + val_lo = rpm_read(rpm, 0, + RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2); + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + rsp->fec_corr_blks = (val_hi << 32 | val_lo); + + val_lo = rpm_read(rpm, 0, + RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3); + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + rsp->fec_uncorr_blks = (val_hi << 32 | val_lo); + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index 77f2ef9e1425..22147b4c2137 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -12,17 +12,19 @@ /* PCI device IDs */ #define PCI_DEVID_CN10K_RPM 0xA060 +#define PCI_SUBSYS_DEVID_CNF10KB_RPM 0xBC00 +#define PCI_DEVID_CN10KB_RPM 0xA09F /* Registers */ #define RPMX_CMRX_CFG 0x00 #define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17) +#define RPMX_CMRX_RX_ID_MAP 0x80 #define RPMX_CMRX_SW_INT 0x180 #define RPMX_CMRX_SW_INT_W1S 0x188 #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 #define RPMX_CMRX_LINK_CFG 0x1070 #define RPMX_MTI_PCS100X_CONTROL1 0x20000 -#define RPMX_MTI_LPCSX_CONTROL1 0x30000 #define RPMX_MTI_PCS_LBK BIT_ULL(14) #define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100)) @@ -76,11 +78,40 @@ #define RPMX_MTI_MAC100X_XIF_MODE 0x8100 #define RPMX_ONESTEP_ENABLE BIT_ULL(5) #define RPMX_TS_BINARY_MODE BIT_ULL(11) +#define RPMX_CONST1 0x2008 + +/* FEC stats */ +#define RPMX_MTI_STAT_STATN_CONTROL 0x10018 +#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 +#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27) +#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050 +#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058 +#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618 +#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620 +#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628 +#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630 +#define RPMX_MTI_FCFECX_CW_HI 0x38638 + +/* CN10KB CSR Declaration */ +#define RPM2_CMRX_SW_INT 0x1b0 +#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8 +#define RPM2_CMR_CHAN_MSK_OR 0x3120 +#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2) +#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1) +#define RPM2_CMR_RX_OVR_BP 0x3130 +#define RPM2_CSR_OFFSET 0x3e00 +#define RPM2_CMRX_PRT_CBFC_CTL 0x6510 +#define RPM2_CMRX_RX_LMACS 0x100 +#define RPM2_CMRX_RX_LOGL_XON 0x3100 +#define RPM2_CMRX_RX_STAT2 0x3010 +#define RPM2_USX_PCSX_CONTROL1 0x80000 +#define RPM2_USX_PCS_LBK BIT_ULL(14) /* Function Declarations */ int rpm_get_nr_lmacs(void *rpmd); u8 rpm_get_lmac_type(void *rpmd, int lmac_id); u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id); +u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id); int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable); void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable); int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause, @@ -97,4 +128,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en); int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause); +int rpm2_get_nr_lmacs(void *rpmd); +bool is_dev_rpm2(void *rpmd); +int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 76474385a602..7f0a64731c67 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -410,9 +410,15 @@ struct rvu_fwdata { u32 ptp_ext_tstamp; #define FWDATA_RESERVED_MEM 1022 u64 reserved[FWDATA_RESERVED_MEM]; -#define CGX_MAX 5 +#define CGX_MAX 9 #define CGX_LMACS_MAX 4 - struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX]; +#define CGX_LMACS_USX 8 + union { + struct cgx_lmac_fwdata_s + cgx_fw_data[CGX_MAX][CGX_LMACS_MAX]; + struct cgx_lmac_fwdata_s + cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX]; + }; /* Do not add new fields below this line */ }; @@ -478,7 +484,7 @@ struct rvu { u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ - u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for * every cgx lmac port */ unsigned long pf_notify_bmap; /* Flags for PF notification */ @@ -851,6 +857,7 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, u64 bcast_mcast_val, u64 bcast_mcast_mask); void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx); +bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf); /* CPT APIs */ int rvu_cpt_register_interrupts(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index addc69f4b65c..438b212fb54a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) return (cgx_features_get(cgxd) & feature); } +#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) /* Returns bitmap of mapped PFs */ -static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } @@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) if (!pfmap) return -ENODEV; else - return find_first_bit(&pfmap, 16); + return find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); } static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) @@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!cgx_cnt_max) return 0; - if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) + if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ - size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; @@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ - rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, - cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), - GFP_KERNEL); + rvu->cgxlmac2pf_map = + devm_kzalloc(rvu->dev, + cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), + GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; @@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!rvu_cgx_pdata(cgx, rvu)) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); - for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); do { - pfid = find_first_bit(&pfmap, 16); + pfid = find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); clear_bit(pfid, &pfmap); /* check if notification is enabled */ @@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) dev_err(rvu->dev, @@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) cgx_lmac_evh_unregister(cgxd, lmac); } @@ -468,6 +472,7 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { int pf = rvu_get_pf(pcifunc); int i = 0, lmac_count = 0; + struct mac_ops *mac_ops; u8 max_dmac_filters; u8 cgx_id, lmac_id; void *cgx_dev; @@ -483,7 +488,12 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_dev = cgx_get_pdata(cgx_id); lmac_count = cgx_get_lmac_cnt(cgx_dev); - max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + mac_ops = get_mac_ops(cgx_dev); + if (!mac_ops) + return; + + max_dmac_filters = mac_ops->dmac_filter_count / lmac_count; for (i = 0; i < max_dmac_filters; i++) cgx_lmac_addr_del(cgx_id, lmac_id, i); @@ -569,6 +579,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct cgx_fec_stats_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); + struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; @@ -577,7 +588,8 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); - return cgx_get_fec_stats(cgxd, lmac, rsp); + mac_ops = get_mac_ops(cgxd); + return mac_ops->get_fec_stats(cgxd, lmac, rsp); } int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, @@ -1110,8 +1122,15 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], - sizeof(struct cgx_lmac_fwdata_s)); + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + memcpy(&rsp->fwdata, + &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], + sizeof(struct cgx_lmac_fwdata_s)); + else + memcpy(&rsp->fwdata, + &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], + sizeof(struct cgx_lmac_fwdata_s)); + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index f66dde2b0f92..fa280ebd3052 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2613,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.cgx = debugfs_create_dir(dname, rvu->rvu_dbg.cgx_root); - for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { /* lmac debugfs dir */ sprintf(dname, "lmac%d", lmac_id); rvu->rvu_dbg.lmac = @@ -2759,6 +2759,12 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { seq_printf(s, "\t%s ", npc_get_field_name(bit)); switch (bit) { + case NPC_LXMB: + if (rule->lxmb == 1) + seq_puts(s, "\tL2M nibble is set\n"); + else + seq_puts(s, "\tL2B nibble is set\n"); + break; case NPC_DMAC: seq_printf(s, "%pM ", rule->packet.dmac); seq_printf(s, "mask %pM\n", rule->mask.dmac); @@ -2796,6 +2802,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "%pI6 ", rule->packet.ip6dst); seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); break; + case NPC_IPFRAG_IPV6: + seq_printf(s, "0x%x ", rule->packet.next_header); + seq_printf(s, "mask 0x%x\n", rule->mask.next_header); + break; + case NPC_IPFRAG_IPV4: + seq_printf(s, "0x%x ", rule->packet.ip_flag); + seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag); + break; case NPC_SPORT_TCP: case NPC_SPORT_UDP: case NPC_SPORT_SCTP: diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 88dee589cb21..e4407f09c9d3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, rvu_af_dl_dwrr_mtu_validate), +}; + +static const struct devlink_param rvu_af_dl_param_exact_match[] = { DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, BIT(DEVLINK_PARAM_CMODE_RUNTIME), @@ -1547,14 +1550,7 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, return 0; } -static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - return devlink_info_driver_name_put(req, DRV_NAME); -} - static const struct devlink_ops rvu_devlink_ops = { - .info_get = rvu_devlink_info_get, .eswitch_mode_get = rvu_devlink_eswitch_mode_get, .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; @@ -1563,7 +1559,6 @@ int rvu_register_dl(struct rvu *rvu) { struct rvu_devlink *rvu_dl; struct devlink *dl; - size_t size; int err; dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), @@ -1585,21 +1580,32 @@ int rvu_register_dl(struct rvu *rvu) goto err_dl_health; } + err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); + if (err) { + dev_err(rvu->dev, + "devlink params register failed with error %d", err); + goto err_dl_health; + } + /* Register exact match devlink only for CN10K-B */ - size = ARRAY_SIZE(rvu_af_dl_params); if (!rvu_npc_exact_has_match_table(rvu)) - size -= 1; + goto done; - err = devlink_params_register(dl, rvu_af_dl_params, size); + err = devlink_params_register(dl, rvu_af_dl_param_exact_match, + ARRAY_SIZE(rvu_af_dl_param_exact_match)); if (err) { dev_err(rvu->dev, - "devlink params register failed with error %d", err); - goto err_dl_health; + "devlink exact match params register failed with error %d", err); + goto err_dl_exact_match; } +done: devlink_register(dl); return 0; +err_dl_exact_match: + devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); + err_dl_health: rvu_health_reporters_destroy(rvu); devlink_free(dl); @@ -1612,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu) struct devlink *dl = rvu_dl->dl; devlink_unregister(dl); - devlink_params_unregister(dl, rvu_af_dl_params, - ARRAY_SIZE(rvu_af_dl_params)); + + devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); + + /* Unregister exact match devlink only for CN10K-B */ + if (rvu_npc_exact_has_match_table(rvu)) + devlink_params_unregister(dl, rvu_af_dl_param_exact_match, + ARRAY_SIZE(rvu_af_dl_param_exact_match)); + rvu_health_reporters_destroy(rvu); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a62c1b322012..6b8747ebc08c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -3197,8 +3197,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) { - /* RPM supports FIFO len 128 KB */ - if (rvu_cgx_get_fifolen(rvu) == 0x20000) + int fifo_size = rvu_cgx_get_fifolen(rvu); + + /* RPM supports FIFO len 128 KB and RPM2 supports double the + * FIFO len to accommodate 8 LMACS + */ + if (fifo_size == 0x20000 || fifo_size == 0x40000) *max_mtu = CN10K_LMAC_LINK_MAX_FRS; else *max_mtu = NIC_HW_MAX_FRS; @@ -4109,7 +4113,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, /* Get LMAC id's from bitmap */ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); - for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); if (!lmac_fifo_len) { dev_err(rvu->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 1e348fd0d930..16cfc802e348 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -617,6 +617,12 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, if (blkaddr < 0) return; + /* Ucast rule should not be installed if DMAC + * extraction is not supported by the profile. + */ + if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf)) + return; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); @@ -778,6 +784,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* Bcast rule should not be installed if both DMAC + * and LXMB extraction is not supported by the profile. + */ + if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && + !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) + return; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); @@ -848,6 +862,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, vf_func = pcifunc & RVU_PFVF_FUNC_MASK; pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* Mcast rule should not be installed if both DMAC + * and LXMB extraction is not supported by the profile. + */ + if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && + !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) + return; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_ALLMULTI_ENTRY); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 7c4e1acd0f77..006beb5cf98d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -26,8 +26,10 @@ static const char * const npc_flow_names[] = { [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", [NPC_TOS] = "tos", + [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", [NPC_SIP_IPV4] = "ipv4 source ip", [NPC_DIP_IPV4] = "ipv4 destination ip", + [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ", [NPC_SIP_IPV6] = "ipv6 source ip", [NPC_DIP_IPV6] = "ipv6 destination ip", [NPC_IPPROTO_TCP] = "ip proto tcp", @@ -43,9 +45,23 @@ static const char * const npc_flow_names[] = { [NPC_DPORT_UDP] = "udp destination port", [NPC_SPORT_SCTP] = "sctp source port", [NPC_DPORT_SCTP] = "sctp destination port", + [NPC_LXMB] = "Mcast/Bcast header ", [NPC_UNKNOWN] = "unknown", }; +bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u64 mcam_features; + u64 unsupported; + + mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features; + unsupported = (mcam_features ^ features) & ~mcam_features; + + /* Return false if at least one of the input flows is not extracted */ + return !unsupported; +} + const char *npc_get_field_name(u8 hdr) { if (hdr >= ARRAY_SIZE(npc_flow_names)) @@ -340,8 +356,10 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; /* if key profile programmed does not extract Ethertype at all */ - if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) + if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { + dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n"); goto vlan_tci; + } /* if key profile programmed extracts Ethertype from one layer */ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) @@ -354,35 +372,45 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) /* if key profile programmed extracts Ethertype from multiple layers */ if (etype_ether->nr_kws && etype_tag1->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { - if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) + if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) { + dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n"); goto vlan_tci; + } } key_fields[NPC_ETYPE] = *etype_tag1; } if (etype_ether->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { - if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) + if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) { + dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n"); goto vlan_tci; + } } key_fields[NPC_ETYPE] = *etype_tag2; } if (etype_tag1->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { - if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) + if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) { + dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n"); goto vlan_tci; + } } key_fields[NPC_ETYPE] = *etype_tag2; } /* check none of higher layers overwrite Ethertype */ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; - if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) + if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) { + dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n"); goto vlan_tci; + } *features |= BIT_ULL(NPC_ETYPE); vlan_tci: /* if key profile does not extract outer vlan tci at all */ - if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) + if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) { + dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n"); goto done; + } /* if key profile extracts outer vlan tci from one layer */ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) @@ -393,15 +421,19 @@ vlan_tci: /* if key profile extracts outer vlan tci from multiple layers */ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { - if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) + if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) { + dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n"); goto done; + } } key_fields[NPC_OUTER_VID] = *vlan_tag2; } /* check none of higher layers overwrite outer vlan tci */ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; - if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) + if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) { + dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n"); goto done; + } *features |= BIT_ULL(NPC_OUTER_VID); done: return; @@ -419,8 +451,6 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); key = FIELD_GET(NPC_KEY_OFFSET, cfg); - start_kwi = key / 8; - offset = (key * 8) % 64; /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding * ethernet header. @@ -435,13 +465,18 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ do { \ + start_kwi = key / 8; \ + offset = (key * 8) % 64; \ if (lid == (hlid) && lt == (hlt)) { \ if ((hstart) >= hdr && \ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ + offset += bit_offset; \ + start_kwi += offset / 64; \ + offset %= 64; \ npc_set_kw_masks(mcam, (name), (hlen) * 8, \ - start_kwi, offset + bit_offset, intf);\ + start_kwi, offset, intf); \ } \ } \ } while (0) @@ -451,8 +486,10 @@ do { \ * Example: Source IP is 4 bytes and starts at 12th byte of IP header */ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); + NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1); NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); + NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1); NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); @@ -522,6 +559,10 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | BIT_ULL(NPC_VLAN_ETYPE_STAG); + + /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) + *features |= BIT_ULL(NPC_LXMB); } /* Scan key extraction profile and record how fields of our interest @@ -599,16 +640,6 @@ static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) dev_err(rvu->dev, "Channel cannot be overwritten\n"); return -EINVAL; } - /* DMAC should be present in key for unicast filter to work */ - if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) { - dev_err(rvu->dev, "DMAC not present in Key\n"); - return -EINVAL; - } - /* check that none of the fields overwrite DMAC */ - if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) { - dev_err(rvu->dev, "DMAC cannot be overwritten\n"); - return -EINVAL; - } npc_set_features(rvu, blkaddr, NIX_INTF_TX); npc_set_features(rvu, blkaddr, NIX_INTF_RX); @@ -639,9 +670,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) unsupported = (*mcam_features ^ features) & ~(*mcam_features); if (unsupported) { - dev_info(rvu->dev, "Unsupported flow(s):\n"); + dev_warn(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) - dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); + dev_warn(rvu->dev, "%s ", npc_get_field_name(bit)); return -EOPNOTSUPP; } @@ -851,6 +882,11 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 0, ~0ULL, 0, intf); + if (features & BIT_ULL(NPC_LXMB)) { + output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1; + npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0, + output->lxmb, 0, intf); + } #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ do { \ if (features & BIT_ULL((field))) { \ @@ -867,6 +903,8 @@ do { \ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, ntohs(mask->etype), 0); NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); + NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0, + mask->ip_flag, 0); NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, ntohl(mask->ip4src), 0); NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, @@ -887,6 +925,8 @@ do { \ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, ntohs(mask->vlan_tci), 0); + NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, + mask->next_header, 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); npc_update_vlan_features(rvu, entry, features, intf); @@ -991,8 +1031,20 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, action.match_id = req->match_id; action.flow_key_alg = req->flow_key_alg; - if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule) - action = pfvf->def_ucast_rule->rx_action; + if (req->op == NIX_RX_ACTION_DEFAULT) { + if (pfvf->def_ucast_rule) { + action = pfvf->def_ucast_rule->rx_action; + } else { + /* For profiles which do not extract DMAC, the default + * unicast entry is unused. Hence modify action for the + * requests which use same action as default unicast + * entry + */ + *(u64 *)&action = 0; + action.pf_func = target; + action.op = NIX_RX_ACTIONOP_UCAST; + } + } entry->action = *(u64 *)&action; @@ -1153,6 +1205,7 @@ find_rule: rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK; rule->chan &= rule->chan_mask; + rule->lxmb = dummy.lxmb; if (is_npc_intf_tx(req->intf)) rule->intf = pfvf->nix_tx_intf; else @@ -1215,6 +1268,35 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_FLOW_INTF_INVALID; + /* If DMAC is not extracted in MKEX, rules installed by AF + * can rely on L2MB bit set by hardware protocol checker for + * broadcast and multicast addresses. + */ + if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf)) + goto process_flow; + + if (is_pffunc_af(req->hdr.pcifunc) && + req->features & BIT_ULL(NPC_DMAC)) { + if (is_unicast_ether_addr(req->packet.dmac)) { + dev_warn(rvu->dev, + "%s: mkex profile does not support ucast flow\n", + __func__); + return NPC_FLOW_NOT_SUPPORTED; + } + + if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) { + dev_warn(rvu->dev, + "%s: mkex profile does not support bcast/mcast flow", + __func__); + return NPC_FLOW_NOT_SUPPORTED; + } + + /* Modify feature to use LXMB instead of DMAC */ + req->features &= ~BIT_ULL(NPC_DMAC); + req->features |= BIT_ULL(NPC_LXMB); + } + +process_flow: if (from_vf && req->default_rule) return NPC_FLOW_VF_PERM_DENIED; @@ -1558,3 +1640,22 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, return 0; } + +int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu, + struct npc_get_field_status_req *req, + struct npc_get_field_status_rsp *rsp) +{ + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (!is_npc_interface_valid(rvu, req->intf)) + return NPC_FLOW_INTF_INVALID; + + if (npc_check_field(rvu, blkaddr, req->field, req->intf)) + rsp->enable = 1; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 594029007f85..f69102d20c90 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -490,7 +490,7 @@ static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id) if (idx == table->tot_ids) { mutex_unlock(&table->lock); dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n", - __func__, bitmap_weight(table->id_bmap, table->tot_ids)); + __func__, table->tot_ids); return false; } @@ -1870,12 +1870,11 @@ int rvu_npc_exact_init(struct rvu *rvu) /* Set capability to true */ rvu->hw->cap.npc_exact_match_enabled = true; - table = kmalloc(sizeof(*table), GFP_KERNEL); + table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__); - memset(table, 0, sizeof(*table)); rvu->hw->table = table; /* Read table size, ways and depth */ @@ -1899,24 +1898,24 @@ int rvu_npc_exact_init(struct rvu *rvu) table_size = table->mem_table.depth * table->mem_table.ways; /* Allocate bitmap for 4way 2K table */ - table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size), - sizeof(long), GFP_KERNEL); + table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size, + GFP_KERNEL); if (!table->mem_table.bmap) return -ENOMEM; dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__); /* Allocate bitmap for 32 entry mcam */ - table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL); + table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL); if (!table->cam_table.bmap) return -ENOMEM; dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__); - table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth; - table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids), - table->tot_ids, GFP_KERNEL); + table->tot_ids = table_size + table->cam_table.depth; + table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids, + GFP_KERNEL); if (!table->id_bmap) return -ENOMEM; @@ -1957,7 +1956,9 @@ int rvu_npc_exact_init(struct rvu *rvu) /* Install SDP drop rule */ drop_mcam_idx = &table->num_drop_rules; - max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE; + max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx + + PF_CGXMAP_BASE; + for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { if (rvu->pf2cgxlmac_map[i] == 0xFF) continue; |