diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 162 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 385 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 155 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 1876 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 368 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 619 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 22 |
14 files changed, 1491 insertions, 2261 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 391f29ef6d2e..4d8f8aba0ea5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -26,8 +26,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.17-0" -#define DRV_MODULE_RELDATE "2013/04/11" +#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -75,13 +75,22 @@ enum bnx2x_int_mode { #define BNX2X_MSG_DCB 0x8000000 /* regular debug print */ +#define DP_INNER(fmt, ...) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); + #define DP(__mask, fmt, ...) \ do { \ if (unlikely(bp->msg_enable & (__mask))) \ - pr_notice("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - bp->dev ? (bp->dev->name) : "?", \ - ##__VA_ARGS__); \ + DP_INNER(fmt, ##__VA_ARGS__); \ +} while (0) + +#define DP_AND(__mask, fmt, ...) \ +do { \ + if (unlikely((bp->msg_enable & (__mask)) == __mask)) \ + DP_INNER(fmt, ##__VA_ARGS__); \ } while (0) #define DP_CONT(__mask, fmt, ...) \ @@ -1146,10 +1155,6 @@ struct bnx2x_port { (offsetof(struct bnx2x_eth_stats, stat_name) / 4) /* slow path */ - -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; - #define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) @@ -1261,6 +1266,7 @@ struct bnx2x_slowpath { union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; + struct tpa_update_ramrod_data tpa_data; } q_rdata; union { @@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data { }; /* Public slow path states */ -enum { +enum sp_rtnl_flag { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_FAN_FAILURE, @@ -1403,6 +1409,12 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, + BNX2X_SP_RTNL_GET_DRV_VERSION, +}; + +enum bnx2x_iov_flag { + BNX2X_IOV_HANDLE_VF_MSG, + BNX2X_IOV_HANDLE_FLR, }; struct bnx2x_prev_path_list { @@ -1603,6 +1615,8 @@ struct bnx2x { int mrrs; struct delayed_work sp_task; + struct delayed_work iov_task; + atomic_t interrupt_occurred; struct delayed_work sp_rtnl_task; @@ -1693,6 +1707,10 @@ struct bnx2x { struct bnx2x_slowpath *slowpath; dma_addr_t slowpath_mapping; + /* Mechanism protecting the drv_info_to_mcp */ + struct mutex drv_info_mutex; + bool drv_info_mng_owner; + /* Total number of FW statistics requests */ u8 fw_stats_num; @@ -1882,6 +1900,9 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; + /* Indication of the IOV tasks */ + unsigned long iov_task_state; + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -2525,6 +2546,8 @@ enum { void bnx2x_set_local_cmng(struct bnx2x *bp); +void bnx2x_update_mng_version(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dbcff509dc3f..9261d5313b5b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp) static int bnx2x_calc_num_queues(struct bnx2x *bp) { - return bnx2x_num_queues ? - min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, netif_get_num_default_rss_queues(), - BNX2X_MAX_QUEUES(bp)); + int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); + + /* Reduce memory usage in kdump environment by using only one queue */ + if (reset_devices) + nq = 1; + + nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); + return nq; } /** @@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (unlikely(bp->panic)) return 0; #endif + if (budget <= 0) + return rx_pkt; bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; @@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp) DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", msix_vec); - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); - + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], + BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); /* * reconfigure number of tx/rx queues according to available * MSI-X vectors */ - if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { - /* how less vectors we will have? */ - int diff = msix_vec - rc; - - BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); - - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); - - if (rc) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); - goto no_msix; - } - /* - * decrease number of queues by number of unallocated entries - */ - bp->num_ethernet_queues -= diff; - bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; - - BNX2X_DEV_INFO("New queue configuration set: %d\n", - bp->num_queues); - } else if (rc > 0) { + if (rc == -ENOSPC) { /* Get by with single vector */ - rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); - if (rc) { + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); + if (rc < 0) { BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", rc); goto no_msix; @@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp) bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; } else if (rc < 0) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); goto no_msix; + } else if (rc < msix_vec) { + /* how less vectors we will have? */ + int diff = msix_vec - rc; + + BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); + + /* + * decrease number of queues by number of unallocated entries + */ + bp->num_ethernet_queues -= diff; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("New queue configuration set: %d\n", + bp->num_queues); } bp->flags |= USING_MSIX_FLAG; @@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) sizeof(struct per_queue_stats) * num_queue_stats + sizeof(struct stats_counter); - BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, - bp->fw_stats_data_sz + bp->fw_stats_req_sz); + bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (!bp->fw_stats) + goto alloc_mem_err; /* Set shortcuts */ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; @@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (CNIC_ENABLED(bp)) bnx2x_load_cnic(bp); + if (IS_PF(bp)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ u32 val; @@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; + /* Clear driver version indication in shmem */ + if (IS_PF(bp)) + bnx2x_update_mng_version(bp); + /* Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ @@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) if (!IS_FCOE_IDX(index)) { /* status blocks */ - if (!CHIP_IS_E1x(bp)) - BNX2X_PCI_ALLOC(sb->e2_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(sb->e1x_sb, - &bnx2x_fp(bp, index, status_blk_mapping), - sizeof(struct host_hc_status_block_e1x)); + if (!CHIP_IS_E1x(bp)) { + sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + if (!sb->e2_sb) + goto alloc_mem_err; + } else { + sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); + if (!sb->e1x_sb) + goto alloc_mem_err; + } } /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to @@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) "allocating tx memory of fp %d cos %d\n", index, cos); - BNX2X_ALLOC(txdata->tx_buf_ring, - sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(txdata->tx_desc_ring, - &txdata->tx_desc_mapping, - sizeof(union eth_tx_bd_types) * NUM_TX_BD); + txdata->tx_buf_ring = kcalloc(NUM_TX_BD, + sizeof(struct sw_tx_bd), + GFP_KERNEL); + if (!txdata->tx_buf_ring) + goto alloc_mem_err; + txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + if (!txdata->tx_desc_ring) + goto alloc_mem_err; } } /* Rx */ if (!skip_rx_queue(bp, index)) { /* fastpath rx rings: rx_buf rx_desc rx_comp */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), - sizeof(struct sw_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), - &bnx2x_fp(bp, index, rx_desc_mapping), - sizeof(struct eth_rx_bd) * NUM_RX_BD); + bnx2x_fp(bp, index, rx_buf_ring) = + kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_buf_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_desc_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + if (!bnx2x_fp(bp, index, rx_desc_ring)) + goto alloc_mem_err; /* Seed all CQEs by 1s */ - BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + bnx2x_fp(bp, index, rx_comp_ring) = + BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); + if (!bnx2x_fp(bp, index, rx_comp_ring)) + goto alloc_mem_err; /* SGE ring */ - BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), - sizeof(struct sw_rx_page) * NUM_RX_SGE); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), - &bnx2x_fp(bp, index, rx_sge_mapping), - BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + bnx2x_fp(bp, index, rx_page_ring) = + kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), + GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_page_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_sge_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + if (!bnx2x_fp(bp, index, rx_sge_ring)) + goto alloc_mem_err; /* RX BD ring */ bnx2x_set_next_page_rx_bd(fp); @@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev) bnx2x_panic(); #endif - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); } int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) @@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, disable = disable ? 1 : (usec ? 0 : 1); storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); } + +void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, + u32 verbose) +{ + smp_mb__before_clear_bit(); + set_bit(flag, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", + flag); + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a89a40f88c25..05f4f5f52635 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -47,31 +47,26 @@ extern int bnx2x_num_queues; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ - (unsigned long long)(*y), x); \ - } while (0) - -#define BNX2X_PCI_FALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset((void *)x, 0xFFFFFFFF, size); \ - DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ - (unsigned long long)(*y), x); \ - } while (0) - -#define BNX2X_ALLOC(x, size) \ - do { \ - x = kzalloc(size, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - } while (0) +#define BNX2X_PCI_ALLOC(y, size) \ +({ \ + void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + x; \ +}) +#define BNX2X_PCI_FALLOC(y, size) \ +({ \ + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) { \ + memset(x, 0xff, size); \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } \ + x; \ +}) /*********************** Interfaces **************************** * Functions that need to be implemented by each driver version @@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); int bnx2x_drain_tx_queues(struct bnx2x *bp); void bnx2x_squeeze_objects(struct bnx2x *bp); +void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, + u32 verbose); + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fdace204b054..97ea5421dd96 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ - if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); } void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) @@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) if (IS_MF(bp)) bnx2x_link_sync_notify(bp); - set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); return; } case BNX2X_DCBX_STATE_TX_PAUSED: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 38fc794c1655..b6de05e3149b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev, #define IS_PORT_STAT(i) \ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ - (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +#define HIDE_PORT_STAT(bp) \ + ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ + IS_VF(bp)) /* ethtool statistics are displayed for all regular ethernet queues and the * fcoe L2 queue if not disabled @@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) BNX2X_NUM_Q_STATS; } else num_strings = 0; - if (IS_MF_MODE_STAT(bp)) { + if (HIDE_PORT_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) num_strings++; @@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; strcpy(buf + (k + j)*ETH_GSTRING_LEN, bnx2x_stats_arr[i].string); @@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, hw_stats = (u32 *)&bp->eth_stats; for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) continue; if (bnx2x_stats_arr[i].size == 0) { /* skip this counter */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 84aecdf06f7a..95dc36543548 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -87,7 +87,6 @@ (IRO[156].base + ((vfId) * IRO[156].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[150].base + ((funcId) * IRO[150].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ (IRO[203].base + ((pfId) * IRO[203].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index cf1df8b62e2c..5ba8af50c84f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2003,6 +2003,23 @@ struct shmem_lfa { #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) }; +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED 0 + + /* personalties order is important */ +#define DRV_PERS_ETHERNET 0 +#define DRV_PERS_ISCSI 1 +#define DRV_PERS_FCOE 2 + + /* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS 3 + u32 versions[MAX_DRV_PERS]; +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF @@ -2217,6 +2234,18 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + + u32 reserved5[2]; + u32 reserved6[PORT_MAX]; + + /* driver version for each personality */ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + u32 mfw_drv_indication; + + /* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) }; @@ -2848,7 +2877,7 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 17 +#define BCM_5710_FW_REVISION_VERSION 19 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7d4382286457..a78edaccceee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -120,7 +120,8 @@ static int debug; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, " Default debug msglevel"); -struct workqueue_struct *bnx2x_wq; +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq; struct bnx2x_mac_vals { u32 xmac_addr; @@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) u16 start = 0, end = 0; u8 cos; #endif - if (disable_int) + if (IS_PF(bp) && disable_int) bnx2x_int_disable(bp); bp->stats_state = STATS_STATE_DISABLED; @@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Indices */ /* Common */ - BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", - bp->def_idx, bp->def_att_idx, bp->attn_state, - bp->spq_prod_idx, bp->stats_counter); - BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", - bp->def_status_blk->atten_status_block.attn_bits, - bp->def_status_blk->atten_status_block.attn_bits_ack, - bp->def_status_blk->atten_status_block.status_block_id, - bp->def_status_blk->atten_status_block.attn_bits_index); - BNX2X_ERR(" def ("); - for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) - pr_cont("0x%x%s", - bp->def_status_blk->sp_sb.index_values[i], - (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); - - for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) - *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - i*sizeof(u32)); - - pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", - sp_sb_data.igu_sb_id, - sp_sb_data.igu_seg_id, - sp_sb_data.p_func.pf_id, - sp_sb_data.p_func.vnic_id, - sp_sb_data.p_func.vf_id, - sp_sb_data.p_func.vf_valid, - sp_sb_data.state); + if (IS_PF(bp)) { + struct host_sp_status_block *def_sb = bp->def_status_blk; + int data_size, cstorm_offset; + + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + def_sb->atten_status_block.attn_bits, + def_sb->atten_status_block.attn_bits_ack, + def_sb->atten_status_block.status_block_id, + def_sb->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + def_sb->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + data_size = sizeof(struct hc_sp_status_block_data) / + sizeof(u32); + cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); + for (i = 0; i < data_size; i++) + *((u32 *)&sp_sb_data + i) = + REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + + i * sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + } for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; @@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) pr_cont("0x%x%s", fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); + + /* VF cannot access FW refelection for status block */ + if (IS_VF(bp)) + continue; + /* fw sb data */ data_size = CHIP_IS_E1x(bp) ? sizeof(struct hc_status_block_data_e1x) : @@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) } #ifdef BNX2X_STOP_ON_ERROR - - /* event queue */ - BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); - for (i = 0; i < NUM_EQ_DESC; i++) { - u32 *data = (u32 *)&bp->eq_ring[i].message.data; - - BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", - i, bp->eq_ring[i].message.opcode, - bp->eq_ring[i].message.error); - BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); + if (IS_PF(bp)) { + /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); + for (i = 0; i < NUM_EQ_DESC; i++) { + u32 *data = (u32 *)&bp->eq_ring[i].message.data; + + BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", + i, bp->eq_ring[i].message.opcode, + bp->eq_ring[i].message.error); + BNX2X_ERR("data: %x %x %x\n", + data[0], data[1], data[2]); + } } /* Rings */ @@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) } } #endif - bnx2x_fw_dump(bp); - bnx2x_mc_assert(bp); + if (IS_PF(bp)) { + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + } BNX2X_ERR("end crash dump -----------------\n"); } @@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) drv_cmd = BNX2X_Q_CMD_EMPTY; break; + case (RAMROD_CMD_ID_ETH_TPA_UPDATE): + DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; + break; + default: BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", command, fp->index); @@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) #else return; #endif - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, true); smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); @@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp) bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) + static void bnx2x_handle_drv_info_req(struct bnx2x *bp) { enum drv_info_opcode op_code; u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + bool release = false; + int wait; /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { @@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT; + /* Must prevent other flows from accessing drv_info_to_mcp */ + mutex_lock(&bp->drv_info_mutex); + memset(&bp->slowpath->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); @@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) default: /* if op code isn't supported - send NACK */ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); - return; + goto out; } /* if we got drv_info attn from MFW then these fields are defined in @@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp) U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + + /* Since possible management wants both this and get_driver_version + * need to wait until management notifies us it finished utilizing + * the buffer. + */ + if (!SHMEM2_HAS(bp, mfw_drv_indication)) { + DP(BNX2X_MSG_MCP, "Management does not support indication\n"); + } else if (!bp->drv_info_mng_owner) { + u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + + for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { + u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + + /* Management is done; need to clear indication */ + if (indication & bit) { + SHMEM2_WR(bp, mfw_drv_indication, + indication & ~bit); + release = true; + break; + } + + msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); + } + } + if (!release) { + DP(BNX2X_MSG_MCP, "Management did not release indication\n"); + bp->drv_info_mng_owner = true; + } + +out: + mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ + u8 vals[4]; + int i = 0; + + if (bnx2x_format) { + i = sscanf(version, "1.%c%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + if (i > 0) + vals[0] -= '0'; + } else { + i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + } + + while (i < 4) + vals[i++] = 0; + + return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ + u32 iscsiver = DRV_VER_NOT_LOADED; + u32 fcoever = DRV_VER_NOT_LOADED; + u32 ethver = DRV_VER_NOT_LOADED; + int idx = BP_FW_MB_IDX(bp); + u8 *version; + + if (!SHMEM2_HAS(bp, func_os_drv_ver)) + return; + + mutex_lock(&bp->drv_info_mutex); + /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ + if (bp->drv_info_mng_owner) + goto out; + + if (bp->state != BNX2X_STATE_OPEN) + goto out; + + /* Parse ethernet driver version */ + ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + if (!CNIC_LOADED(bp)) + goto out; + + /* Try getting storage driver version via cnic */ + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_iscsi_stat(bp); + version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; + iscsiver = bnx2x_update_mng_version_utility(version, false); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_fcoe_stat(bp); + version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; + fcoever = bnx2x_update_mng_version_utility(version, false); + +out: + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + + mutex_unlock(&bp->drv_info_mutex); + + DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", + ethver, iscsiver, fcoever); } static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) @@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid)); - type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - - type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); + /* In some cases, type may already contain the func-id + * mainly in SRIOV related use cases, so we add it here only + * if it's not already set. + */ + if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & + SPE_HDR_CONN_TYPE; + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + } else { + type = cmd_type; + } spe->hdr.type = cpu_to_le16(type); @@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); } static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_handle_drv_info_req(bp); if (val & DRV_STATUS_VF_DISABLED) - bnx2x_vf_handle_flr_event(bp); + bnx2x_schedule_iov_task(bp, + BNX2X_IOV_HANDLE_FLR); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); @@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp) /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_VF_PF_CHANNEL: - DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); - bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); + bnx2x_vf_mbx_schedule(bp, + &elem->message.data.vf_pf_event); continue; case EVENT_RING_OPCODE_STAT_QUERY: - DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, - "got statistics comp event %d\n", - bp->stats_comp++); + DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), + "got statistics comp event %d\n", + bp->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; @@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; } else { + int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; + DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); f_obj->complete_cmd(bp, f_obj, @@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) * sp_rtnl task as all Queue SP operations * should run under rtnl_lock. */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, cmd, 0); } goto next_spqe; @@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work) le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); } - /* must be called after the EQ processing (since eq leads to sriov - * ramrod completion flows). - * This flow may have been scheduled by the arrival of a ramrod - * completion, or by the sriov code rescheduling itself. - */ - bnx2x_iov_sp_task(bp); - /* afex - poll to check if VIFSET_ACK should be sent to MFW */ if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state)) { @@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; - if (IS_MF_SI(bp)) - /* - * In switch independent mode, the TSTORM needs to accept - * packets that failed classification, since approximate match - * mac addresses aren't written to NIG LLH - */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); - else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); - /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp) int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { - if (!CHIP_IS_E1x(bp)) + if (!CHIP_IS_E1x(bp)) { /* size = the status block + ramrod buffers */ - BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, - sizeof(struct host_hc_status_block_e2)); - else - BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, - &bp->cnic_sb_mapping, - sizeof(struct - host_hc_status_block_e1x)); + bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + if (!bp->cnic_sb.e2_sb) + goto alloc_mem_err; + } else { + bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + if (!bp->cnic_sb.e1x_sb) + goto alloc_mem_err; + } - if (CONFIGURE_NIC_MODE(bp) && !bp->t2) + if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { /* allocate searcher T2 table, as it wasn't allocated before */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } /* write address to which L5 should insert its values */ bp->cnic_eth_dev.addr_drv_info_to_mcp = @@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp) { int i, allocated, context_size; - if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) + if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { /* allocate searcher T2 table */ - BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } - BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); + bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + if (!bp->def_status_blk) + goto alloc_mem_err; - BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, - sizeof(struct bnx2x_slowpath)); + bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + if (!bp->slowpath) + goto alloc_mem_err; /* Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT @@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp) for (i = 0, allocated = 0; allocated < context_size; i++) { bp->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); - BNX2X_PCI_ALLOC(bp->context[i].vcxt, - &bp->context[i].cxt_mapping, - bp->context[i].size); + bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, + bp->context[i].size); + if (!bp->context[i].vcxt) + goto alloc_mem_err; allocated += bp->context[i].size; } - BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); + bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), + GFP_KERNEL); + if (!bp->ilt->lines) + goto alloc_mem_err; if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) goto alloc_mem_err; @@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp) goto alloc_mem_err; /* Slow path ring */ - BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); + bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); + if (!bp->spq) + goto alloc_mem_err; /* EQ */ - BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, - BCM_PAGE_SIZE * NUM_EQ_PAGES); + bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + if (!bp->eq_ring) + goto alloc_mem_err; return 0; @@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) synchronize_irq(bp->pdev->irq); flush_workqueue(bnx2x_wq); + flush_workqueue(bnx2x_iov_wq); while (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED && tout--) @@ -9774,6 +9909,10 @@ sp_rtnl_not_reset: bnx2x_dcbx_resume_hw_tx(bp); } + if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, + &bp->sp_rtnl_state)) + bnx2x_update_mng_version(bp); + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); + mutex_init(&bp->drv_info_mutex); + bp->drv_info_mng_owner = false; spin_lock_init(&bp->stats_lock); sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); if (IS_PF(bp)) { rc = bnx2x_get_hwinfo(bp); if (rc) @@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->disable_tpa = disable_tpa; bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); + /* Reduce memory usage in kdump environment by disabling TPA */ + bp->disable_tpa |= reset_devices; /* Set TPA flags */ if (bp->disable_tpa) { @@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, { int mc_count = netdev_mc_count(bp->dev); struct bnx2x_mcast_list_elem *mc_mac = - kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); struct netdev_hw_addr *ha; if (!mc_mac) @@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev) return; } else { /* Schedule an SP task to handle rest of change */ - DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, + NETIF_MSG_IFUP); } } @@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) /* configuring mcast to a vf involves sleeping (when we * wait for the pf's response). */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_MCAST, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_VFPF_MCAST, 0); } } @@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void) pr_err("Cannot create workqueue\n"); return -ENOMEM; } + bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); + if (!bnx2x_iov_wq) { + pr_err("Cannot create iov workqueue\n"); + destroy_workqueue(bnx2x_wq); + return -ENOMEM; + } ret = pci_register_driver(&bnx2x_pci_driver); if (ret) { pr_err("Cannot register driver\n"); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); } return ret; } @@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void) pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { @@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) REG_WR(bp, scratch_offset + i, *(host_addr + i/4)); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); break; } @@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, rcu_assign_pointer(bp->cnic_ops, ops); + /* Schedule driver to read CNIC driver versions */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 0fb6ff2ac8e3..31297266b743 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, raw->clear_pending(raw); return 0; } else { - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, raw->clear_pending(raw); return 0; } else { - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, rss_obj->config_rss = bnx2x_setup_rss; } -int validate_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac) -{ - if (!vlan_mac->get_n_elements) { - BNX2X_ERR("vlan mac object was not intialized\n"); - return -EINVAL; - } - return 0; -} - /********************** Queue state object ***********************************/ /** @@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ - return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cids[cid_index], U64_HI(data_mapping), U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp, return bnx2x_q_send_update(bp, params); } +static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_tpa_params *params, + struct tpa_update_ramrod_data *data) +{ + data->client_id = obj->cl_id; + data->complete_on_both_clients = params->complete_on_both_clients; + data->dont_verify_rings_pause_thr_flg = + params->dont_verify_thr; + data->max_agg_size = cpu_to_le16(params->max_agg_sz); + data->max_sges_for_packet = params->max_sges_pkt; + data->max_tpa_queues = params->max_tpa_queues; + data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); + data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); + data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); + data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); + data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); + data->tpa_mode = params->tpa_mode; + data->update_ipv4 = params->update_ipv4; + data->update_ipv6 = params->update_ipv6; +} + static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, struct bnx2x_queue_state_params *params) { - /* TODO: Not implemented yet. */ - return -1; + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tpa_update_ramrod_data *rdata = + (struct tpa_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_tpa_params *update_tpa_params = + ¶ms->params.update_tpa; + u16 type; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); + + /* Add the function id inside the type, so that sp post function + * doesn't automatically add the PF func-id, this is required + * for operations done by PFs on behalf of their VFs + */ + type = ETH_CONNECTION_TYPE | + ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, + o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), type); } static inline int bnx2x_q_send_halt(struct bnx2x *bp, @@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, rdata->tx_switch_suspend = switch_update_params->suspend; rdata->echo = SWITCH_UPDATE; + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, U64_HI(data_mapping), U64_LO(data_mapping), NONE_CONNECTION_TYPE); @@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, rdata->allowed_priorities = afex_update_params->allowed_priorities; rdata->echo = AFEX_UPDATE; - /* No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). */ DP(BNX2X_MSG_SP, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", @@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, U64_HI(data_mapping), U64_LO(data_mapping), NONE_CONNECTION_TYPE); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 00d7f214a40a..80f6c790ed88 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -893,6 +893,24 @@ struct bnx2x_queue_update_params { u8 cid_index; }; +struct bnx2x_queue_update_tpa_params { + dma_addr_t sge_map; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_pkt; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u8 _pad; + + u16 sge_buff_sz; + u16 max_agg_sz; + + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; +}; + struct rxq_pause_params { u16 bd_th_lo; u16 bd_th_hi; @@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params { /* Params according to the current command */ union { struct bnx2x_queue_update_params update; + struct bnx2x_queue_update_tpa_params update_tpa; struct bnx2x_queue_setup_params setup; struct bnx2x_queue_init_params init; struct bnx2x_queue_setup_tx_only_params tx_only; @@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); -int validate_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *vlan_mac); #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e42f48df6e94..5c523b32db70 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, mmiowb(); barrier(); } -/* VFOP - VF slow-path operation support */ -#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 +static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, + struct bnx2x_virtf *vf, + bool print_err) +{ + if (!bnx2x_leading_vfq(vf, sp_initialized)) { + if (print_err) + BNX2X_ERR("Slowpath objects not yet initialized!\n"); + else + DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); + return false; + } + return true; +} /* VFOP operations states */ -enum bnx2x_vfop_qctor_state { - BNX2X_VFOP_QCTOR_INIT, - BNX2X_VFOP_QCTOR_SETUP, - BNX2X_VFOP_QCTOR_INT_EN -}; - -enum bnx2x_vfop_qdtor_state { - BNX2X_VFOP_QDTOR_HALT, - BNX2X_VFOP_QDTOR_TERMINATE, - BNX2X_VFOP_QDTOR_CFCDEL, - BNX2X_VFOP_QDTOR_DONE -}; - -enum bnx2x_vfop_vlan_mac_state { - BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - BNX2X_VFOP_VLAN_MAC_CLEAR, - BNX2X_VFOP_VLAN_MAC_CHK_DONE, - BNX2X_VFOP_MAC_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST, - BNX2X_VFOP_VLAN_CONFIG_LIST_0 -}; - -enum bnx2x_vfop_qsetup_state { - BNX2X_VFOP_QSETUP_CTOR, - BNX2X_VFOP_QSETUP_VLAN0, - BNX2X_VFOP_QSETUP_DONE -}; - -enum bnx2x_vfop_mcast_state { - BNX2X_VFOP_MCAST_DEL, - BNX2X_VFOP_MCAST_ADD, - BNX2X_VFOP_MCAST_CHK_DONE -}; -enum bnx2x_vfop_qflr_state { - BNX2X_VFOP_QFLR_CLR_VLAN, - BNX2X_VFOP_QFLR_CLR_MAC, - BNX2X_VFOP_QFLR_TERMINATE, - BNX2X_VFOP_QFLR_DONE -}; - -enum bnx2x_vfop_flr_state { - BNX2X_VFOP_FLR_QUEUES, - BNX2X_VFOP_FLR_HW -}; - -enum bnx2x_vfop_close_state { - BNX2X_VFOP_CLOSE_QUEUES, - BNX2X_VFOP_CLOSE_HW -}; - -enum bnx2x_vfop_rxmode_state { - BNX2X_VFOP_RXMODE_CONFIG, - BNX2X_VFOP_RXMODE_DONE -}; - -enum bnx2x_vfop_qteardown_state { - BNX2X_VFOP_QTEARDOWN_RXMODE, - BNX2X_VFOP_QTEARDOWN_CLR_VLAN, - BNX2X_VFOP_QTEARDOWN_CLR_MAC, - BNX2X_VFOP_QTEARDOWN_CLR_MCAST, - BNX2X_VFOP_QTEARDOWN_QDTOR, - BNX2X_VFOP_QTEARDOWN_DONE -}; - -enum bnx2x_vfop_rss_state { - BNX2X_VFOP_RSS_CONFIG, - BNX2X_VFOP_RSS_DONE -}; - -#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) - void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, struct bnx2x_queue_setup_params *setup_params, @@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type) { struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; @@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, } } -/* VFOP queue construction */ -static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_create(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qctor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QCTOR_INIT: - - /* has this queue already been opened? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_ACTIVE) { - DP(BNX2X_MSG_IOV, - "Entered qctor but queue was already up. Aborting gracefully\n"); - goto op_done; - } - - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_SETUP; - - q_params->cmd = BNX2X_Q_CMD_INIT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QCTOR_SETUP: - /* next state */ - vfop->state = BNX2X_VFOP_QCTOR_INT_EN; - - /* copy pre-prepared setup params to the queue-state params */ - vfop->op_p->qctor.qstate.params.setup = - vfop->op_p->qctor.prep_qsetup; - - q_params->cmd = BNX2X_Q_CMD_SETUP; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + struct bnx2x_queue_state_params *q_params; + int rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QCTOR_INT_EN: + /* Prepare ramrod information */ + q_params = &qctor->qstate; + q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); - /* enable interrupts */ - bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), - USTORM_ID, 0, IGU_INT_ENABLE, 0); - goto op_done; - default: - bnx2x_vfop_default(state); + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); + goto out; } -op_err: - BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} -static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + /* Run Queue 'construction' ramrods */ + q_params->cmd = BNX2X_Q_CMD_INIT; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - vfop->args.qctor.qid = qid; - vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); + memcpy(&q_params->params.setup, &qctor->prep_qsetup, + sizeof(struct bnx2x_queue_setup_params)); + q_params->cmd = BNX2X_Q_CMD_SETUP; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; - bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, - bnx2x_vfop_qctor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, - cmd->block); - } - return -ENOMEM; + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), + USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: + return rc; } -/* VFOP queue destruction */ -static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; - struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; - enum bnx2x_vfop_qdtor_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_QDTOR_HALT: - - /* has this queue already been stopped? */ - if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == - BNX2X_Q_LOGICAL_STATE_STOPPED) { - DP(BNX2X_MSG_IOV, - "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; - - q_params->cmd = BNX2X_Q_CMD_HALT; - vfop->rc = bnx2x_queue_state_change(bp, q_params); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_QDTOR_TERMINATE: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; - - q_params->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_CFC_DEL}; + struct bnx2x_queue_state_params q_params; + int rc, i; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_QDTOR_CFCDEL: - /* next state */ - vfop->state = BNX2X_VFOP_QDTOR_DONE; + /* Prepare ramrod information */ + memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); + q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params->cmd = BNX2X_Q_CMD_CFC_DEL; - vfop->rc = bnx2x_queue_state_change(bp, q_params); + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); + goto out; + } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", - vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); -op_done: - case BNX2X_VFOP_QDTOR_DONE: - /* invalidate the context */ - if (qdtor->cxt) { - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + /* Run Queue 'destruction' ramrods */ + for (i = 0; i < ARRAY_SIZE(cmds); i++) { + q_params.cmd = cmds[i]; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); + return rc; } - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); } -op_pending: - return; -} - -static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_queue_state_params *qstate = - &vf->op_params.qctor.qstate; - - memset(qstate, 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - - vfop->args.qdtor.qid = qid; - vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); - - bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, - bnx2x_vfop_qdtor, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, - cmd->block); - } else { - BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); - return -ENOMEM; +out: + /* Clean Context */ + if (bnx2x_vfq(vf, qid, cxt)) { + bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; + bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; } + + return 0; } static void @@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) BP_VFDB(bp)->vf_sbs_pool++; } -/* VFOP MAC/VLAN helpers */ -static inline void bnx2x_vfop_credit(struct bnx2x *bp, - struct bnx2x_vfop *vfop, - struct bnx2x_vlan_mac_obj *obj) +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *obj, + atomic_t *counter) { - struct bnx2x_vfop_args_filters *args = &vfop->args.filters; - - /* update credit only if there is no error - * and a valid credit counter - */ - if (!vfop->rc && args->credit) { - struct list_head *pos; - int read_lock; - int cnt = 0; - - read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); - if (read_lock) - DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); + struct list_head *pos; + int read_lock; + int cnt = 0; - list_for_each(pos, &obj->head) - cnt++; + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); - if (!read_lock) - bnx2x_vlan_mac_h_read_unlock(bp, obj); + list_for_each(pos, &obj->head) + cnt++; - atomic_set(args->credit, cnt); - } -} - -static int bnx2x_vfop_set_user_req(struct bnx2x *bp, - struct bnx2x_vfop_filter *pos, - struct bnx2x_vlan_mac_data *user_req) -{ - user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : - BNX2X_VLAN_MAC_DEL; + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); - switch (pos->type) { - case BNX2X_VFOP_FILTER_MAC: - memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); - break; - case BNX2X_VFOP_FILTER_VLAN: - user_req->u.vlan.vlan = pos->vid; - break; - default: - BNX2X_ERR("Invalid filter type, skipping\n"); - return 1; - } - return 0; + atomic_set(counter, cnt); } -static int bnx2x_vfop_config_list(struct bnx2x *bp, - struct bnx2x_vfop_filters *filters, - struct bnx2x_vlan_mac_ramrod_params *vlan_mac) +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, bool drv_only, bool mac) { - struct bnx2x_vfop_filter *pos, *tmp; - struct list_head rollback_list, *filters_list = &filters->head; - struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; - int rc = 0, cnt = 0; - - INIT_LIST_HEAD(&rollback_list); - - list_for_each_entry_safe(pos, tmp, filters_list, link) { - if (bnx2x_vfop_set_user_req(bp, pos, user_req)) - continue; + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; - rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (rc >= 0) { - cnt += pos->add ? 1 : -1; - list_move(&pos->link, &rollback_list); - rc = 0; - } else if (rc == -EEXIST) { - rc = 0; - } else { - BNX2X_ERR("Failed to add a new vlan_mac command\n"); - break; - } - } + DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, + mac ? "MACs" : "VLANs"); - /* rollback if error or too many rules added */ - if (rc || cnt > filters->add_cnt) { - BNX2X_ERR("error or too many rules added. Performing rollback\n"); - list_for_each_entry_safe(pos, tmp, &rollback_list, link) { - pos->add = !pos->add; /* reverse op */ - bnx2x_vfop_set_user_req(bp, pos, user_req); - bnx2x_config_vlan_mac(bp, vlan_mac); - list_del(&pos->link); - } - cnt = 0; - if (!rc) - rc = -EINVAL; + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (mac) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + } else { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); } - filters->add_cnt = cnt; - return rc; -} - -/* VFOP set VLAN/MAC */ -static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; - struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; - struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; - - enum bnx2x_vfop_vlan_mac_state state = vfop->state; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - bnx2x_vfop_reset_wq(vf); - - switch (state) { - case BNX2X_VFOP_VLAN_MAC_CLEAR: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do delete */ - vfop->rc = obj->delete_all(bp, obj, - &vlan_mac->user_req.vlan_mac_flags, - &vlan_mac->ramrod_flags); - - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do config */ - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (vfop->rc == -EEXIST) - vfop->rc = 0; + ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_MAC_CHK_DONE: - vfop->rc = !!obj->raw.check_pending(&obj->raw); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MAC_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (vfop->rc) - goto op_err; - - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_VLAN_CONFIG_LIST: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - /* do list config */ - vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); - if (!vfop->rc) { - set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); - vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); - } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); - default: - bnx2x_vfop_default(state); + /* Start deleting */ + rc = ramrod.vlan_mac_obj->delete_all(bp, + ramrod.vlan_mac_obj, + &ramrod.user_req.vlan_mac_flags, + &ramrod.ramrod_flags); + if (rc) { + BNX2X_ERR("Failed to delete all %s\n", + mac ? "MACs" : "VLANs"); + return rc; } -op_err: - BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); -op_done: - kfree(filters); - bnx2x_vfop_credit(bp, vfop, obj); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} - -struct bnx2x_vfop_vlan_mac_flags { - bool drv_only; - bool dont_consume; - bool single_cmd; - bool add; -}; - -static void -bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; - - memset(ramrod, 0, sizeof(*ramrod)); - /* ramrod flags */ - if (flags->drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); - if (flags->single_cmd) - set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); + /* Clear the vlan counters */ + if (!mac) + atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); - /* mac_vlan flags */ - if (flags->dont_consume) - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); - - /* cmd */ - ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; -} - -static inline void -bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, - struct bnx2x_vfop_vlan_mac_flags *flags) -{ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); - set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); + return 0; } -static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_mac_vlan_filter *filter, + bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + struct bnx2x_vlan_mac_ramrod_params ramrod; int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single */ - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false /* don't care */, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); - - /* set extra args */ - vfop->args.filters = filters; - - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", + vf->abs_vfid, filter->add ? "Adding" : "Deleting", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (filter->type == BNX2X_VF_FILTER_VLAN) { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + } else { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + } + ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + /* Verify there are available vlan credits */ + if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && + (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= + vf_vlan_rules_cnt(vf))) { + BNX2X_ERR("No credits for vlan\n"); + return -ENOMEM; } - return -ENOMEM; -} - -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = macs, - .credit = NULL, /* consume credit */ - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care since only the items in the - * filters list affect the sp operation, - * not the list itself - */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Add/Remove the filter */ + rc = bnx2x_config_vlan_mac(bp, &ramrod); + if (rc && rc != -EEXIST) { + BNX2X_ERR("Failed to %s %s\n", + filter->add ? "add" : "delete", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : + "VLAN"); + return rc; + } - /* set extra args */ - filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; - vfop->args.filters = filters; + /* Update the vlan counters */ + if (filter->type == BNX2X_VF_FILTER_VLAN) + bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, + &bnx2x_vfq(vf, qid, vlan_count)); - bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; + return 0; } -static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, u16 vid, bool add) +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; + int rc = 0, i; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = false, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = add, - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - ramrod->user_req.u.vlan.vlan = vid; - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* set extra args */ - vfop->args.filters = filters; + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); + /* Prepare ramrod params */ + for (i = 0; i < filters->count; i++) { + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], drv_only); + if (rc) + break; } - return -ENOMEM; -} - -static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, bool drv_only) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = NULL, /* single command */ - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = true, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + /* Rollback if needed */ + if (i != filters->count) { + BNX2X_ERR("Managed only %d/%d filters - rolling back\n", + i, filters->count + 1); + while (--i >= 0) { + filters->filters[i].add = !filters->filters[i].add; + bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], + drv_only); + } + } - /* set extra args */ - vfop->args.filters = filters; + /* It's our responsibility to free the filters */ + kfree(filters); - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; + return rc; } -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only) +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); int rc; - if (vfop) { - struct bnx2x_vfop_args_filters filters = { - .multi_filter = vlans, - .credit = &bnx2x_vfq(vf, qid, vlan_count), - }; - struct bnx2x_vfop_vlan_mac_flags flags = { - .drv_only = drv_only, - .dont_consume = (filters.credit != NULL), - .single_cmd = false, - .add = false, /* don't care */ - }; - struct bnx2x_vlan_mac_ramrod_params *ramrod = - &vf->op_params.vlan_mac; - - /* set ramrod params */ - bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - - /* set object */ - rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); - if (rc) - return rc; - ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - - /* set extra args */ - filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - - atomic_read(filters.credit); - - vfop->args.filters = filters; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, - bnx2x_vfop_vlan_mac, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, - cmd->block); - } - return -ENOMEM; -} - -/* VFOP queue setup (queue constructor + set vlan 0) */ -static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qctor.qid; - enum bnx2x_vfop_qsetup_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_qsetup, - .block = false, - }; - - if (vfop->rc < 0) + rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); + if (rc) goto op_err; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + /* Configure vlan0 for leading queue */ + if (!qid) { + struct bnx2x_vf_mac_vlan_filter filter; - switch (state) { - case BNX2X_VFOP_QSETUP_CTOR: - /* init the queue ctor command */ - vfop->state = BNX2X_VFOP_QSETUP_VLAN0; - vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); - if (vfop->rc) + memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); + filter.type = BNX2X_VF_FILTER_VLAN; + filter.add = true; + filter.vid = 0; + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); + if (rc) goto op_err; - return; - - case BNX2X_VFOP_QSETUP_VLAN0: - /* skip if non-leading or FPGA/EMU*/ - if (qid) - goto op_done; + } - /* init the queue set-vlan command (for vlan 0) */ - vfop->state = BNX2X_VFOP_QSETUP_DONE; - vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); - if (vfop->rc) - goto op_err; - return; + /* Schedule the configuration of any pending vlan filters */ + vf->cfg_flags |= VF_CFG_VLAN; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_MSG_IOV); + return 0; op_err: - BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QSETUP_DONE: - vf->cfg_flags |= VF_CFG_VLAN; - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; - if (vfop) { - vfop->args.qctor.qid = qid; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, - bnx2x_vfop_qsetup, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, - cmd->block); + /* If needed, clean the filtering data base */ + if ((qid == LEADING_IDX) && + bnx2x_validate_vf_sp_objs(bp, vf, false)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + if (rc) + goto op_err; } - return -ENOMEM; -} - -/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ -static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qflr_state state = vfop->state; - struct bnx2x_queue_state_params *qstate; - struct bnx2x_vfop_cmd cmd; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); + /* Terminate queue */ + if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { + struct bnx2x_queue_state_params qstate; - cmd.done = bnx2x_vfop_qflr; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QFLR_CLR_VLAN: - /* vlan-clear-all: driver-only, don't consume credit */ - vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - - if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) { - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, - qid, true); - if (vfop->rc) - goto op_err; - return; - - } else { - /* need to reschedule ourselves */ - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - case BNX2X_VFOP_QFLR_CLR_MAC: - /* mac-clear-all: driver only consume credit */ - vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) { - /* the vlan_mac vfop will re-schedule us */ - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, - qid, true); - if (vfop->rc) - goto op_err; - return; - - } else { - /* need to reschedule ourselves */ - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - } - - case BNX2X_VFOP_QFLR_TERMINATE: - qstate = &vfop->op_p->qctor.qstate; - memset(qstate , 0, sizeof(*qstate)); - qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - vfop->state = BNX2X_VFOP_QFLR_DONE; - - DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", - vf->abs_vfid, qstate->q_obj->state); - - if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { - qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; - qstate->cmd = BNX2X_Q_CMD_TERMINATE; - vfop->rc = bnx2x_queue_state_change(bp, qstate); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); - } else { - goto op_done; - } - -op_err: - BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); -op_done: - case BNX2X_VFOP_QFLR_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate.cmd = BNX2X_Q_CMD_TERMINATE; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) + goto op_err; } -op_pending: - return; -} - -static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = qid; - bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, - bnx2x_vfop_qflr, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, - cmd->block); - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; } -/* VFOP multi-casts */ -static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; - struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; - struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; - enum bnx2x_vfop_mcast_state state = vfop->state; - int i; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; + struct bnx2x_mcast_list_elem *mc = NULL; + struct bnx2x_mcast_ramrod_params mcast; + int rc, i; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_MCAST_DEL: - /* clear existing mcasts */ - vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD - : BNX2X_VFOP_MCAST_CHK_DONE; - mcast->mcast_list_len = vf->mcast_list_len; - vf->mcast_list_len = args->mc_num; - vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - - case BNX2X_VFOP_MCAST_ADD: - if (raw->check_pending(raw)) - goto op_pending; - - /* update mcast list on the ramrod params */ - INIT_LIST_HEAD(&mcast->mcast_list); - for (i = 0; i < args->mc_num; i++) - list_add_tail(&(args->mc[i].link), - &mcast->mcast_list); - mcast->mcast_list_len = args->mc_num; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* add new mcasts */ - vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; - vfop->rc = bnx2x_config_mcast(bp, mcast, - BNX2X_MCAST_CMD_ADD); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - - case BNX2X_VFOP_MCAST_CHK_DONE: - vfop->rc = raw->check_pending(raw) ? 1 : 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - default: - bnx2x_vfop_default(state); + /* Prepare Multicast command */ + memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); + mcast.mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); + if (mc_num) { + mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), + GFP_KERNEL); + if (!mc) { + BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + return -ENOMEM; + } } -op_err: - BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); -op_done: - kfree(args->mc); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - return; -} -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only) -{ - struct bnx2x_vfop *vfop = NULL; - size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); - struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : - NULL; - - if (!mc_sz || mc) { - vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - int i; - struct bnx2x_mcast_ramrod_params *ramrod = - &vf->op_params.mcast; - - /* set ramrod params */ - memset(ramrod, 0, sizeof(*ramrod)); - ramrod->mcast_obj = &vf->mcast_obj; - if (drv_only) - set_bit(RAMROD_DRV_CLR_ONLY, - &ramrod->ramrod_flags); - - /* copy mcasts pointers */ - vfop->args.mc_list.mc_num = mcast_num; - vfop->args.mc_list.mc = mc; - for (i = 0; i < mcast_num; i++) - mc[i].mac = mcasts[i]; - - bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, - bnx2x_vfop_mcast, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, - cmd->block); - } else { + /* clear existing mcasts */ + mcast.mcast_list_len = vf->mcast_list_len; + vf->mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) { + BNX2X_ERR("Failed to remove multicasts\n"); + if (mc) kfree(mc); - } + return rc; } - return -ENOMEM; -} - -/* VFOP rx-mode */ -static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; - enum bnx2x_vfop_rxmode_state state = vfop->state; - - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_RXMODE_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* update mcast list on the ramrod params */ + if (mc_num) { + INIT_LIST_HEAD(&mcast.mcast_list); + for (i = 0; i < mc_num; i++) { + mc[i].mac = mcasts[i]; + list_add_tail(&mc[i].link, + &mcast.mcast_list); + } - /* record the accept flags in vfdb so hypervisor can modify them - * if necessary - */ - bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = - ramrod->rx_accept_flags; - vfop->rc = bnx2x_config_rx_mode(bp, ramrod); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RXMODE_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); + /* add new mcasts */ + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + if (rc) + BNX2X_ERR("Faled to add multicasts\n"); + kfree(mc); } -op_pending: - return; + + return rc; } static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, @@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); } -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags) +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - if (vfop) { - struct bnx2x_rx_mode_ramrod_params *ramrod = - &vf->op_params.rx_mode; + struct bnx2x_rx_mode_ramrod_params ramrod; - bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, - bnx2x_vfop_rxmode, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, - cmd->block); - } - return -ENOMEM; + bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; + return bnx2x_config_rx_mode(bp, &ramrod); } -/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, - * queue destructor) - */ -static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - int qid = vfop->args.qx.qid; - enum bnx2x_vfop_qteardown_state state = vfop->state; - struct bnx2x_vfop_cmd cmd; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - cmd.done = bnx2x_vfop_qdown; - cmd.block = false; - - switch (state) { - case BNX2X_VFOP_QTEARDOWN_RXMODE: - /* Drop all */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; - vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: - /* vlan-clear-all: don't consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; - - case BNX2X_VFOP_QTEARDOWN_CLR_MAC: - /* mac-clear-all: consume credit */ - vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); - if (vfop->rc) - goto op_err; - return; + int rc; - case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: - vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); - if (vfop->rc) - goto op_err; - return; + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); - case BNX2X_VFOP_QTEARDOWN_QDTOR: - /* run the queue destruction flow */ - DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); - vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; - DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); - vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); - DP(BNX2X_MSG_IOV, "returned from cmd\n"); - if (vfop->rc) + /* Remove all classification configuration for leading queue */ + if (qid == LEADING_IDX) { + rc = bnx2x_vf_rxmode(bp, vf, qid, 0); + if (rc) goto op_err; - return; -op_err: - BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", - vf->abs_vfid, qid, vfop->rc); - - case BNX2X_VFOP_QTEARDOWN_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -} -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - - /* for non leading queues skip directly to qdown sate */ - if (vfop) { - vfop->args.qx.qid = qid; - bnx2x_vfop_opset(qid == LEADING_IDX ? - BNX2X_VFOP_QTEARDOWN_RXMODE : - BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, - cmd->block); + /* Remove filtering if feasible */ + if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, true); + if (rc) + goto op_err; + rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); + if (rc) + goto op_err; + } } - return -ENOMEM; + /* Destroy queue */ + rc = bnx2x_vf_queue_destroy(bp, vf, qid); + if (rc) + goto op_err; + return rc; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, rc); + return rc; } /* VF enable primitives @@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_tx_hw_flushed(bp, poll_cnt); } -static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_flr_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_flr, - .block = false, - }; - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + int rc, i; - switch (state) { - case BNX2X_VFOP_FLR_QUEUES: - /* the cleanup operations are valid if and only if the VF - * was first acquired. - */ - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, - qx->qid); - if (vfop->rc) - goto op_err; - return; - } - /* remove multicasts */ - vfop->state = BNX2X_VFOP_FLR_HW; - vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, - 0, true); - if (vfop->rc) - goto op_err; - return; - case BNX2X_VFOP_FLR_HW: + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - /* dispatch final cleanup and wait for HW queues to flush */ - bnx2x_vf_flr_clnup_hw(bp, vf); + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_flr(bp, vf, i); + if (rc) + goto out; + } - /* release VF resources */ - bnx2x_vf_free_resc(bp, vf); + /* remove multicasts */ + bnx2x_vf_mcast(bp, vf, NULL, 0, true); - /* re-open the mailbox */ - bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - vf->flr_clnup_stage = VF_FLR_ACK; - bnx2x_vfop_end(bp, vf, vfop); - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); -} + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); -static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t done) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, - bnx2x_vfop_flr, done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); - } - return -ENOMEM; + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + return; +out: + BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", + vf->abs_vfid, i, rc); } -static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) +static void bnx2x_vf_flr_clnup(struct bnx2x *bp) { - int i = prev_vf ? prev_vf->index + 1 : 0; struct bnx2x_virtf *vf; + int i; - /* find next VF to cleanup */ -next_vf_to_clean: - for (; - i < BNX2X_NR_VIRTFN(bp) && - (bnx2x_vf(bp, i, state) != VF_RESET || - bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); - i++) - ; + for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { + /* VF should be RESET & in FLR cleanup states */ + if (bnx2x_vf(bp, i, state) != VF_RESET || + !bnx2x_vf(bp, i, flr_clnup_stage)) + continue; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, - BNX2X_NR_VIRTFN(bp)); + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", + i, BNX2X_NR_VIRTFN(bp)); - if (i < BNX2X_NR_VIRTFN(bp)) { vf = BP_VF(bp, i); /* lock the vf pf channel */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); /* invoke the VF FLR SM */ - if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { - BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", - vf->abs_vfid); + bnx2x_vf_flr(bp, vf); - /* mark the VF to be ACKED and continue */ - vf->flr_clnup_stage = VF_FLR_ACK; - goto next_vf_to_clean; - } - return; - } - - /* we are done, update vf records */ - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - - if (vf->flr_clnup_stage != VF_FLR_ACK) - continue; - - vf->flr_clnup_stage = VF_FLR_EPILOG; + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = false; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); } /* Acknowledge the handled VFs. @@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) if (reset) { /* set as reset and ready for cleanup */ vf->state = VF_RESET; - vf->flr_clnup_stage = VF_FLR_CLN; + vf->flr_clnup_stage = true; DP(BNX2X_MSG_IOV, "Initiating Final cleanup for VF %d\n", @@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp) } /* do the FLR cleanup for all marked VFs*/ - bnx2x_vf_flr_clnup(bp, NULL); + bnx2x_vf_flr_clnup(bp); } /* IOV global initialization routines */ @@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_vf(bp, i, index) = i; bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; bnx2x_vf(bp, i, state) = VF_FREE; - INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); mutex_init(&bnx2x_vf(bp, i, op_mutex)); bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; } @@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, goto failed; } + /* Prepare the VFs event synchronization mechanism */ + mutex_init(&bp->vfdb->event_mutex); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp) cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); if (cxt->size) { - BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); + cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); + if (!cxt->addr) + goto alloc_mem_err; } else { cxt->addr = NULL; cxt->mapping = 0; @@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp) /* allocate vfs ramrods dma memory - client_init and set_mac */ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); - BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, - tot_size); + BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, + tot_size); + if (!BP_VFDB(bp)->sp_dma.addr) + goto alloc_mem_err; BP_VFDB(bp)->sp_dma.size = tot_size; /* allocate mailboxes */ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; - BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, - tot_size); + BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, + tot_size); + if (!BP_VF_MBX_DMA(bp)->addr) + goto alloc_mem_err; + BP_VF_MBX_DMA(bp)->size = tot_size; /* allocate local bulletin boards */ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; - BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, - &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); + BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, + tot_size); + if (!BP_VF_BULLETIN_DMA(bp)->addr) + goto alloc_mem_err; + BP_VF_BULLETIN_DMA(bp)->size = tot_size; return 0; @@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_sp_map(bp, vf, q_data), q_type); + /* sp indication is set only when vlan/mac/etc. are initialized */ + q->sp_initialized = false; + DP(BNX2X_MSG_IOV, "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", vf->abs_vfid, q->sp_obj.func_id, q->cid); @@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp) /* release all the VFs */ for_each_vf(bp, i) - bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ + bnx2x_vf_release(bp, BP_VF(bp, i)); return 0; } @@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, smp_mb__after_clear_bit(); } +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); +} + int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) { struct bnx2x_virtf *vf; @@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) case EVENT_RING_OPCODE_CLASSIFICATION_RULES: case EVENT_RING_OPCODE_MULTICAST_RULES: case EVENT_RING_OPCODE_FILTERS_RULES: + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: cid = (elem->message.data.eth_event.echo & BNX2X_SWCID_MASK); DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); @@ -2447,13 +1726,15 @@ get_vf: vf->abs_vfid, qidx); bnx2x_vf_handle_filters_eqe(bp, vf); break; + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_rss_update_eqe(bp, vf); case EVENT_RING_OPCODE_VF_FLR: case EVENT_RING_OPCODE_MALICIOUS_VF: /* Do nothing for now */ return 0; } - /* SRIOV: reschedule any 'in_progress' operations */ - bnx2x_iov_sp_event(bp, cid, false); return 0; } @@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, } } -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) -{ - struct bnx2x_virtf *vf; - - /* check if the cid is the VF range */ - if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) - return; - - vf = bnx2x_vf_by_cid(bp, vf_cid); - if (vf) { - /* set in_progress flag */ - atomic_set(&vf->op_in_progress, 1); - if (queue_work) - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); - } -} - void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) { int i; @@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - (is_fcoe ? 0 : 1); - DP(BNX2X_MSG_IOV, - "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", - BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, - first_queue_query_index + num_queues_req); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", + BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, + first_queue_query_index + num_queues_req); cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats) + @@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) struct bnx2x_virtf *vf = BP_VF(bp, i); if (vf->state != VF_ENABLED) { - DP(BNX2X_MSG_IOV, - "vf %d not enabled so no stats for it\n", - vf->abs_vfid); + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d not enabled so no stats for it\n", + vf->abs_vfid); continue; } @@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -void bnx2x_iov_sp_task(struct bnx2x *bp) -{ - int i; - - if (!IS_SRIOV(bp)) - return; - /* Iterate over all VFs and invoke state transition for VFs with - * 'in-progress' slow-path operations - */ - DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); - - if (!vf) { - BNX2X_ERR("VF was null! skipping...\n"); - continue; - } - - if (!list_empty(&vf->op_list_head) && - atomic_read(&vf->op_in_progress)) { - DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); - bnx2x_vfop_cur(bp, vf)->transition(bp, vf); - } - } -} - static inline struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) { @@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie) p->vf->state = p->state; } -/* VFOP close (teardown the queues, delete mcasts and close HW) */ -static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; - enum bnx2x_vfop_close_state state = vfop->state; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_close, - .block = false, - }; + int rc = 0, i; - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_CLOSE_QUEUES: - - if (++(qx->qid) < vf_rxq_count(vf)) { - vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); - if (vfop->rc) - goto op_err; - return; - } - vfop->state = BNX2X_VFOP_CLOSE_HW; - vfop->rc = 0; - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); - case BNX2X_VFOP_CLOSE_HW: - - /* disable the interrupts */ - DP(BNX2X_MSG_IOV, "disabling igu\n"); - bnx2x_vf_igu_disable(bp, vf); + /* Close all queues */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_teardown(bp, vf, i); + if (rc) + goto op_err; + } - /* disable the VF */ - DP(BNX2X_MSG_IOV, "clearing qtbl\n"); - bnx2x_vf_clr_qtbl(bp, vf); + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); - goto op_done; - default: - bnx2x_vfop_default(state); - } -op_err: - BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); /* need to make sure there are no outstanding stats ramrods which may * cause the device to access the VF's stats buffer which it will free @@ -2909,43 +2121,20 @@ op_done: } DP(BNX2X_MSG_IOV, "set state to acquired\n"); - bnx2x_vfop_end(bp, vf, vfop); -op_pending: - /* Not supported at the moment; Exists for macros only */ - return; -} -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - vfop->args.qx.qid = -1; /* loop */ - bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, - bnx2x_vfop_close, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, - cmd->block); - } - return -ENOMEM; + return 0; +op_err: + BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); + return rc; } /* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ -static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_release, - .block = false, - }; - - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - - if (vfop->rc < 0) - goto op_err; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, vf->state == VF_FREE ? "Free" : @@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) switch (vf->state) { case VF_ENABLED: - vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vfop->rc) + rc = bnx2x_vf_close(bp, vf); + if (rc) goto op_err; - return; - + /* Fallthrough to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); - DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - goto op_done; + break; case VF_FREE: case VF_RESET: - /* do nothing */ - goto op_done; default: - bnx2x_vfop_default(vf->state); + break; } + return 0; op_err: - BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: - bnx2x_vfop_end(bp, vf, vfop); + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); + return rc; } -static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss) { - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_rss_state state; - - if (!vfop) { - BNX2X_ERR("vfop was null\n"); - return; - } - - state = vfop->state; - bnx2x_vfop_reset_wq(vf); - - if (vfop->rc < 0) - goto op_err; - - DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - - switch (state) { - case BNX2X_VFOP_RSS_CONFIG: - /* next state */ - vfop->state = BNX2X_VFOP_RSS_DONE; - bnx2x_config_rss(bp, &vfop->op_p->rss); - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: - BNX2X_ERR("RSS error: rc %d\n", vfop->rc); -op_done: - case BNX2X_VFOP_RSS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; - default: - bnx2x_vfop_default(state); - } -op_pending: - return; + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); + return bnx2x_config_rss(bp, rss); } -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params) { - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(-1, /* use vf->state */ - bnx2x_vfop_release, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, - cmd->block); - } - return -ENOMEM; -} + aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; + struct bnx2x_queue_state_params qstate; + int qid, rc = 0; -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Set ramrod params */ + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + memcpy(&qstate.params.update_tpa, params, + sizeof(struct bnx2x_queue_update_tpa_params)); + qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, - cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, - cmd->block); + for (qid = 0; qid < vf_rxq_count(vf); qid++) { + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.params.update_tpa.sge_map = sge_addr[qid]; + DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", + vf->abs_vfid, qid, U64_HI(sge_addr[qid]), + U64_LO(sge_addr[qid])); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) { + BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", + U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), + vf->abs_vfid, qid); + return rc; + } } - return -ENOMEM; + + return rc; } /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) { - struct bnx2x_vfop_cmd cmd = { - .done = NULL, - .block = block, - }; int rc; DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); + rc = bnx2x_vf_free(bp, vf); if (rc) WARN(rc, "VF[%d] Failed to allocate resources for release op- rc=%d\n", vf->abs_vfid, rc); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + return rc; } static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, @@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, *sbdf = vf->devfn | (vf->bus << 8); } -static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, - struct bnx2x_vf_bar_info *bar_info) -{ - int n; - - bar_info->nr_bars = bp->vfdb->sriov.nres; - for (n = 0; n < bar_info->nr_bars; n++) - bar_info->bars[n] = vf->bars[n]; -} - void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { @@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ - if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) + if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 0, ETH_ALEN); - if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 0, VLAN_HLEN); + } } else { /* mac */ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ unsigned long ramrod_flags = 0; - struct bnx2x_vlan_mac_obj *mac_obj = - &bnx2x_leading_vfq(vf, mac_obj); + struct bnx2x_vlan_mac_obj *mac_obj; - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; + /* User should be able to see failure reason in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); /* remove existing eth macs */ + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); if (rc) { BNX2X_ERR("failed to delete eth macs\n"); @@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) BNX2X_Q_LOGICAL_STATE_ACTIVE) return rc; - /* configure the vlan in device on this vf's queue */ - vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); /* remove existing vlans */ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, &ramrod_flags); if (rc) { @@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp) bnx2x_sample_bulletin(bp); /* if channel is down we need to self destruct */ - if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_MSG_IOV); } void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) @@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) mutex_init(&bp->vf2pf_mutex); /* allocate vf2pf mailbox for vf to pf channel */ - BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, - sizeof(struct bnx2x_vf_mbx_msg)); + bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + if (!bp->vf2pf_mbox) + goto alloc_mem_err; /* allocate pf 2 vf bulletin board */ - BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, - sizeof(union pf_vf_bulletin)); + bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); + if (!bp->pf2vf_bulletin) + goto alloc_mem_err; return 0; @@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp) bnx2x_post_vf_bulletin(bp, vf_idx); } } + +void bnx2x_iov_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + + if (!netif_running(bp->dev)) + return; + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, + &bp->iov_task_state)) + bnx2x_vf_handle_flr_event(bp); + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, + &bp->iov_task_state)) + bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ + smp_mb__before_clear_bit(); + set_bit(flag, &bp->iov_task_state); + smp_mb__after_clear_bit(); + DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d9fcca1b5a9d..8bf764570eef 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -30,6 +30,8 @@ enum sample_bulletin_result { #ifdef CONFIG_BNX2X_SRIOV +extern struct workqueue_struct *bnx2x_iov_wq; + /* The bnx2x device structure holds vfdb structure described below. * The VF array is indexed by the relative vfid. */ @@ -83,108 +85,35 @@ struct bnx2x_vf_queue { u16 index; u16 sb_idx; bool is_leading; + bool sp_initialized; }; -/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: - * q-init, q-setup and SB index +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index */ -struct bnx2x_vfop_qctor_params { +struct bnx2x_vf_queue_construct_params { struct bnx2x_queue_state_params qstate; struct bnx2x_queue_setup_params prep_qsetup; }; -/* VFOP parameters (one copy per VF) */ -union bnx2x_vfop_params { - struct bnx2x_vlan_mac_ramrod_params vlan_mac; - struct bnx2x_rx_mode_ramrod_params rx_mode; - struct bnx2x_mcast_ramrod_params mcast; - struct bnx2x_config_rss_params rss; - struct bnx2x_vfop_qctor_params qctor; -}; - /* forward */ struct bnx2x_virtf; /* VFOP definitions */ -typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); - -struct bnx2x_vfop_cmd { - vfop_handler_t done; - bool block; -}; -/* VFOP queue filters command additional arguments */ -struct bnx2x_vfop_filter { - struct list_head link; +struct bnx2x_vf_mac_vlan_filter { int type; -#define BNX2X_VFOP_FILTER_MAC 1 -#define BNX2X_VFOP_FILTER_VLAN 2 +#define BNX2X_VF_FILTER_MAC 1 +#define BNX2X_VF_FILTER_VLAN 2 bool add; u8 *mac; u16 vid; }; -struct bnx2x_vfop_filters { - int add_cnt; - struct list_head head; - struct bnx2x_vfop_filter filters[]; -}; - -/* transient list allocated, built and saved until its - * passed to the SP-VERBs layer. - */ -struct bnx2x_vfop_args_mcast { - int mc_num; - struct bnx2x_mcast_list_elem *mc; -}; - -struct bnx2x_vfop_args_qctor { - int qid; - u16 sb_idx; -}; - -struct bnx2x_vfop_args_qdtor { - int qid; - struct eth_context *cxt; -}; - -struct bnx2x_vfop_args_defvlan { - int qid; - bool enable; - u16 vid; - u8 prio; -}; - -struct bnx2x_vfop_args_qx { - int qid; - bool en_add; -}; - -struct bnx2x_vfop_args_filters { - struct bnx2x_vfop_filters *multi_filter; - atomic_t *credit; /* non NULL means 'don't consume credit' */ -}; - -union bnx2x_vfop_args { - struct bnx2x_vfop_args_mcast mc_list; - struct bnx2x_vfop_args_qctor qctor; - struct bnx2x_vfop_args_qdtor qdtor; - struct bnx2x_vfop_args_defvlan defvlan; - struct bnx2x_vfop_args_qx qx; - struct bnx2x_vfop_args_filters filters; -}; - -struct bnx2x_vfop { - struct list_head link; - int rc; /* return code */ - int state; /* next state */ - union bnx2x_vfop_args args; /* extra arguments */ - union bnx2x_vfop_params *op_p; /* ramrod params */ - - /* state machine callbacks */ - vfop_handler_t transition; - vfop_handler_t done; +struct bnx2x_vf_mac_vlan_filters { + int count; + struct bnx2x_vf_mac_vlan_filter filters[]; }; /* vf context */ @@ -204,15 +133,7 @@ struct bnx2x_virtf { #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ - /* non 0 during flr cleanup */ - u8 flr_clnup_stage; -#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' - * sans the end-wait - */ -#define VF_FLR_ACK 2 /* ACK flr notification */ -#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW - * ~ final cleanup' end wait - */ + bool flr_clnup_stage; /* true during flr cleanup */ /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ @@ -276,11 +197,6 @@ struct bnx2x_virtf { struct bnx2x_rss_config_obj rss_conf_obj; /* slow-path operations */ - atomic_t op_in_progress; - int op_rc; - bool op_wait_blocking; - struct list_head op_list_head; - union bnx2x_vfop_params op_params; struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; }; @@ -338,11 +254,6 @@ struct bnx2x_vf_mbx { u32 vf_addr_hi; struct vfpf_first_tlv first_tlv; /* saved VF request header */ - - u8 flags; -#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent - * more then one pending msg - */ }; struct bnx2x_vf_sp { @@ -419,6 +330,10 @@ struct bnx2x_vfdb { /* the number of msix vectors belonging to this PF designated for VFs */ u16 vf_sbs_pool; u16 first_vf_igu_entry; + + /* sp_rtnl synchronization */ + struct mutex event_mutex; + u64 event_occur; }; /* queue access */ @@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj); -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); void bnx2x_iov_storm_stats_update(struct bnx2x *bp); -void bnx2x_iov_sp_task(struct bnx2x *bp); /* global vf mailbox routines */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event); void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); /* CORE VF API */ @@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map); -/* VFOP generic helpers */ -#define bnx2x_vfop_default(state) do { \ - BNX2X_ERR("Bad state %d\n", (state)); \ - vfop->rc = -EINVAL; \ - goto op_err; \ - } while (0) - -enum { - VFOP_DONE, - VFOP_CONT, - VFOP_VERIFY_PEND, -}; - -#define bnx2x_vfop_finalize(vf, rc, next) do { \ - if ((rc) < 0) \ - goto op_err; \ - else if ((rc) > 0) \ - goto op_pending; \ - else if ((next) == VFOP_DONE) \ - goto op_done; \ - else if ((next) == VFOP_VERIFY_PEND) \ - BNX2X_ERR("expected pending\n"); \ - else { \ - DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ - atomic_set(&vf->op_in_progress, 1); \ - queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ - return; \ - } \ - } while (0) - -#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ - do { \ - vfop->state = first_state; \ - vfop->op_p = &vf->op_params; \ - vfop->transition = trans_hndlr; \ - vfop->done = done_hndlr; \ - } while (0) - -static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - WARN_ON(list_empty(&vf->op_list_head)); - return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); -} - -static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); - - WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); - if (vfop) { - INIT_LIST_HEAD(&vfop->link); - list_add(&vfop->link, &vf->op_list_head); - } - return vfop; -} - -static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, - struct bnx2x_vfop *vfop) -{ - /* rc < 0 - error, otherwise set to 0 */ - DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); - if (vfop->rc >= 0) - vfop->rc = 0; - DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); - - /* unlink the current op context and propagate error code - * must be done before invoking the 'done()' handler - */ - WARN(!mutex_is_locked(&vf->op_mutex), - "about to access vf op linked list but mutex was not locked!"); - list_del(&vfop->link); - - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); - vf->op_rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } else { - struct bnx2x_vfop *cur_vfop; - - DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); - cur_vfop = bnx2x_vfop_cur(bp, vf); - cur_vfop->rc = vfop->rc; - DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - } - - /* invoke done handler */ - if (vfop->done) { - DP(BNX2X_MSG_IOV, "calling done handler\n"); - vfop->done(bp, vf); - } else { - /* there is no done handler for the operation to unlock - * the mutex. Must have gotten here from PF initiated VF RELEASE - */ - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); - } - - DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", - vf->op_rc, vfop->rc); - - /* if this is the last nested op reset the wait_blocking flag - * to release any blocking wrappers, only after 'done()' is invoked - */ - if (list_empty(&vf->op_list_head)) { - DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); - vf->op_wait_blocking = false; - } - - kfree(vfop); -} - -static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, - struct bnx2x_virtf *vf) -{ - /* can take a while if any port is running */ - int cnt = 5000; - - might_sleep(); - while (cnt--) { - if (vf->op_wait_blocking == false) { -#ifdef BNX2X_STOP_ON_ERROR - DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - usleep_range(1000, 2000); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static inline int bnx2x_vfop_transition(struct bnx2x *bp, - struct bnx2x_virtf *vf, - vfop_handler_t transition, - bool block) -{ - if (block) - vf->op_wait_blocking = true; - transition(bp, vf); - if (block) - return bnx2x_vfop_wait_blocking(bp, vf); - return 0; -} - /* VFOP queue construction helpers */ void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, @@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, - struct bnx2x_vfop_qctor_params *p, + struct bnx2x_vf_queue_construct_params *p, unsigned long q_type); -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *macs, - int qid, bool drv_only); - -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - struct bnx2x_vfop_filters *vlans, - int qid, bool drv_only); - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid); - -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - bnx2x_mac_addr_t *mcasts, - int mcast_num, bool drv_only); - -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd, - int qid, unsigned long accept_flags); - -int bnx2x_vfop_close_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); - -int bnx2x_vfop_release_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd); +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss); + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params); /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); @@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); + #else /* CONFIG_BNX2X_SRIOV */ static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} -static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, - bool queue_work) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) {return 1; } -static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} -static inline void bnx2x_vf_mbx(struct bnx2x *bp, - struct vf_pf_event_data *vfpf_event) {} +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } @@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} +static inline void bnx2x_iov_task(struct work_struct *work) {} +static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} + #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 3fa6c2a2a5a9..0622884596b2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->leading_rss = cl_id; q->is_leading = true; + q->sp_initialized = true; } /* ask the pf to open a queue for the vf */ @@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } @@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); - switch (mode) { - case BNX2X_RX_MODE_NONE: /* no Rx */ + /* Ignore everything accept MODE_NONE */ + if (mode == BNX2X_RX_MODE_NONE) { req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; - break; - case BNX2X_RX_MODE_NORMAL: + } else { + /* Current PF driver will not look at the specific flags, + * but they are required when working with older drivers on hv. + */ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_ALLMULTI: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - case BNX2X_RX_MODE_PROMISC: - req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; - req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; - break; - default: - BNX2X_ERR("BAD rx mode (%d)\n", mode); - rc = -EINVAL; - goto out; } req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; @@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); rc = -EINVAL; } -out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return rc; @@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, } static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int vf_rc) { struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; @@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); - resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); /* send response */ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + @@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, storm_memset_vf_mbx_ack(bp, vf->abs_vfid); mmiowb(); - /* initiate dmae to send the response */ - mbx->flags &= ~VF_MSG_INPROCESS; - /* copy the response header including status-done field, * must be last dmae, must be after FW is acked */ @@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, return; mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ + bnx2x_vf_release(bp, vf); } static void bnx2x_vf_mbx_resp(struct bnx2x *bp, - struct bnx2x_virtf *vf) + struct bnx2x_virtf *vf, + int rc) { bnx2x_vf_mbx_resp_single_tlv(bp, vf); - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); } static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, @@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | - /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); + PFVF_CAP_TPA | + PFVF_CAP_TPA_UPDATE); bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, sizeof(resp->pfdev_info.fw_ver)); @@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, sizeof(struct channel_list_end_tlv)); /* send the response */ - vf->op_rc = vfop_status; - bnx2x_vf_mbx_resp_send_msg(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); } static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_init_tlv *init = &mbx->msg->req.init; + int rc; /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; vf->stats_stride = init->stats_stride; - vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); /* set VF multiqueue statistics collection mode */ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) vf->cfg_flags |= VF_CFG_STATS_COALESCE; /* response */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* convert MBX queue-flags to standard SP queue-flags */ @@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + struct bnx2x_vf_queue_construct_params qctor; + int rc = 0; /* verify vf_qid */ if (setup_q->vf_qid >= vf_rxq_count(vf)) { BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", setup_q->vf_qid, vf_rxq_count(vf)); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto response; } @@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_leading_vfq_init(bp, vf, q); /* re-init the VF operation context */ - memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); - setup_p = &vf->op_params.qctor.prep_qsetup; - init_p = &vf->op_params.qctor.qstate.params.init; + memset(&qctor, 0 , + sizeof(struct bnx2x_vf_queue_construct_params)); + setup_p = &qctor.prep_qsetup; + init_p = &qctor.qstate.params.init; /* activate immediately */ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); @@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, q->index, q->sb_idx); } /* complete the preparations */ - bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); + bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); - vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); - if (vf->op_rc) + rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); + if (rc) goto response; - return; } response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } -enum bnx2x_vfop_filters_state { - BNX2X_VFOP_MBX_Q_FILTERS_MACS, - BNX2X_VFOP_MBX_Q_FILTERS_VLANS, - BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, - BNX2X_VFOP_MBX_Q_FILTERS_MCAST, - BNX2X_VFOP_MBX_Q_FILTERS_DONE -}; - static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, struct bnx2x_virtf *vf, struct vfpf_set_q_filters_tlv *tlv, - struct bnx2x_vfop_filters **pfl, + struct bnx2x_vf_mac_vlan_filters **pfl, u32 type_flag) { int i, j; - struct bnx2x_vfop_filters *fl = NULL; + struct bnx2x_vf_mac_vlan_filters *fl = NULL; size_t fsz; - fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + - sizeof(struct bnx2x_vfop_filters); + fsz = tlv->n_mac_vlan_filters * + sizeof(struct bnx2x_vf_mac_vlan_filter) + + sizeof(struct bnx2x_vf_mac_vlan_filters); fl = kzalloc(fsz, GFP_KERNEL); if (!fl) return -ENOMEM; - INIT_LIST_HEAD(&fl->head); - for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; @@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, continue; if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { fl->filters[j].mac = msg_filter->mac; - fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; + fl->filters[j].type = BNX2X_VF_FILTER_MAC; } else { fl->filters[j].vid = msg_filter->vlan_tag; - fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; + fl->filters[j].type = BNX2X_VF_FILTER_VLAN; } fl->filters[j].add = (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? true : false; - list_add_tail(&fl->filters[j++].link, &fl->head); + fl->count++; } - if (list_empty(&fl->head)) + if (!fl->count) kfree(fl); else *pfl = fl; @@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID -static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) { - int rc; + int rc = 0; struct vfpf_set_q_filters_tlv *msg = &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; - struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); - enum bnx2x_vfop_filters_state state = vfop->state; - - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vfop_mbx_qfilters, - .block = false, - }; - - DP(BNX2X_MSG_IOV, "STATE: %d\n", state); - - if (vfop->rc < 0) - goto op_err; + /* check for any mac/vlan changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vf_mac_vlan_filters *fl = NULL; - switch (state) { - case BNX2X_VFOP_MBX_Q_FILTERS_MACS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (rc) + goto op_err; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build mac list */ - struct bnx2x_vfop_filters *fl = NULL; + if (fl) { - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_MAC_FILTER); - if (vfop->rc) + /* set mac list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set mac list */ - rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ - case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; + /* build vlan list */ + fl = NULL; - /* check for any vlan/mac changes */ - if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build vlan list */ - struct bnx2x_vfop_filters *fl = NULL; - - vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_VLAN_FILTER); - if (vfop->rc) + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) goto op_err; - - if (fl) { - /* set vlan list */ - rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, - msg->vf_qid, - false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; - - if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { - unsigned long accept = 0; - struct pf_vf_bulletin_content *bulletin = - BP_VF_BULLETIN(bp, vf->index); - - /* covert VF-PF if mask to bnx2x accept flags */ - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) - __set_bit(BNX2X_ACCEPT_UNICAST, &accept); - - if (msg->rx_mask & - VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) - __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) - __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); - - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) - __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); + } - if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) - __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); - /* A packet arriving the vf's mac should be accepted - * with any vlan, unless a vlan has already been - * configured. - */ - if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); - - /* set rx-mode */ - rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, - msg->vf_qid, accept); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; + /* Ignore VF requested mode; instead set a regular mode */ + if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) { + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); } - /* fall through */ - - case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: - /* next state */ - vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; - - if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { - /* set mcasts */ - rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, - msg->n_multicast, false); - if (rc) { - vfop->rc = rc; - goto op_err; - } - return; - } - /* fall through */ -op_done: - case BNX2X_VFOP_MBX_Q_FILTERS_DONE: - bnx2x_vfop_end(bp, vf, vfop); - return; -op_err: - BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", - vf->abs_vfid, msg->vf_qid, vfop->rc); - goto op_done; - default: - bnx2x_vfop_default(state); + /* A packet arriving the vf's mac should be accepted + * with any vlan, unless a vlan has already been + * configured. + */ + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + + /* set rx-mode */ + rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); + if (rc) + goto op_err; } -} -static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vfop_cmd *cmd) -{ - struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - if (vfop) { - bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, - bnx2x_vfop_mbx_qfilters, cmd->done); - return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, - cmd->block); + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vf_mcast(bp, vf, msg->multicast, + msg->n_multicast, false); + if (rc) + goto op_err; } - return -ENOMEM; +op_err: + if (rc) + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, rc); + return rc; } -static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, - struct bnx2x_virtf *vf, - struct bnx2x_vf_mbx *mbx) +static int bnx2x_filters_validate_mac(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) { - struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc = 0; /* if a mac was already set for this VF via the set vf mac ndo, we only * accept mac configurations of that mac. Why accept them at all? @@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, if (filters->n_mac_vlan_filters > 1) { BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } @@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } } + +response: + return rc; +} + +static int bnx2x_filters_validate_vlan(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + /* if vlan was set by hypervisor we don't allow guest to config vlan */ if (bulletin->valid_bitmap & 1 << VLAN_VALID) { int i; @@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, VFPF_Q_FILTER_VLAN_TAG_VALID) { BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", vf->abs_vfid); - vf->op_rc = -EPERM; + rc = -EPERM; goto response; } } } /* verify vf_qid */ - if (filters->vf_qid > vf_rxq_count(vf)) + if (filters->vf_qid > vf_rxq_count(vf)) { + rc = -EPERM; + goto response; + } + +response: + return rc; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; + int rc; + + rc = bnx2x_filters_validate_mac(bp, vf, filters); + if (rc) + goto response; + + rc = bnx2x_filters_validate_vlan(bp, vf, filters); + if (rc) goto response; DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", @@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, /* print q_filter message */ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); - vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); - if (vf->op_rc) - goto response; - return; - + rc = bnx2x_vf_mbx_qfilters(bp, vf); response: - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { int qid = mbx->msg->req.q_op.vf_qid; - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", vf->abs_vfid, qid); - vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_queue_teardown(bp, vf, qid); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_close(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; + int rc; DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); - vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + rc = bnx2x_vf_free(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) { - struct bnx2x_vfop_cmd cmd = { - .done = bnx2x_vf_mbx_resp, - .block = false, - }; - struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct bnx2x_config_rss_params rss; struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + int rc = 0; if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || rss_tlv->rss_key_size != T_ETH_RSS_KEY) { BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", vf->index); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } + memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); + /* set vfop params according to rss tlv */ - memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + memcpy(rss.ind_table, rss_tlv->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); - memcpy(vf_op_params->rss_key, rss_tlv->rss_key, - sizeof(rss_tlv->rss_key)); - vf_op_params->rss_obj = &vf->rss_conf_obj; - vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); + rss.rss_obj = &vf->rss_conf_obj; + rss.rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ - vf_op_params->rss_flags = 0; - vf_op_params->ramrod_flags = 0; + rss.rss_flags = 0; + rss.ramrod_flags = 0; if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) - __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) - __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) - __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4) - __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) - __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) - __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6) - __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) - __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) - __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { BNX2X_ERR("about to hit a FW assert. aborting...\n"); - vf->op_rc = -EINVAL; + rc = -EINVAL; goto mbx_resp; } - vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); + rc = bnx2x_vf_rss_update(bp, vf, &rss); +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_validate_tpa_params(struct bnx2x *bp, + struct vfpf_tpa_tlv *tpa_tlv) +{ + int rc = 0; + + if (tpa_tlv->tpa_client_info.max_sges_for_packet > + U_ETH_MAX_SGES_FOR_PACKET) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_sges received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_sges_for_packet, + U_ETH_MAX_SGES_FOR_PACKET); + } + + if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_tpa_queues, + MAX_AGG_QS(bp)); + } + + return rc; +} + +static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_queue_update_tpa_params vf_op_params; + struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; + int rc = 0; + + memset(&vf_op_params, 0, sizeof(vf_op_params)); + + if (bnx2x_validate_tpa_params(bp, tpa_tlv)) + goto mbx_resp; + + vf_op_params.complete_on_both_clients = + tpa_tlv->tpa_client_info.complete_on_both_clients; + vf_op_params.dont_verify_thr = + tpa_tlv->tpa_client_info.dont_verify_thr; + vf_op_params.max_agg_sz = + tpa_tlv->tpa_client_info.max_agg_size; + vf_op_params.max_sges_pkt = + tpa_tlv->tpa_client_info.max_sges_for_packet; + vf_op_params.max_tpa_queues = + tpa_tlv->tpa_client_info.max_tpa_queues; + vf_op_params.sge_buff_sz = + tpa_tlv->tpa_client_info.sge_buff_size; + vf_op_params.sge_pause_thr_high = + tpa_tlv->tpa_client_info.sge_pause_thr_high; + vf_op_params.sge_pause_thr_low = + tpa_tlv->tpa_client_info.sge_pause_thr_low; + vf_op_params.tpa_mode = + tpa_tlv->tpa_client_info.tpa_mode; + vf_op_params.update_ipv4 = + tpa_tlv->tpa_client_info.update_ipv4; + vf_op_params.update_ipv6 = + tpa_tlv->tpa_client_info.update_ipv6; + + rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); mbx_resp: - if (vf->op_rc) - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); } /* dispatch request */ @@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, case CHANNEL_TLV_UPDATE_RSS: bnx2x_vf_mbx_update_rss(bp, vf, mbx); return; + case CHANNEL_TLV_UPDATE_TPA: + bnx2x_vf_mbx_update_tpa(bp, vf, mbx); + return; } } else { @@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* can we respond to VF (do we have an address for it?) */ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { - /* mbx_resp uses the op_rc of the VF */ - vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; - /* notify the VF that we do not support this request */ - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); } else { /* can't send a response since this VF is unknown to us * just ack the FW to release the mailbox and unlock @@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, } } -/* handle new vf-pf message */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) { - struct bnx2x_virtf *vf; - struct bnx2x_vf_mbx *mbx; u8 vf_idx; - int rc; DP(BNX2X_MSG_IOV, "vf pf event received: vfid %d, address_hi %x, address lo %x", @@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) BNX2X_NR_VIRTFN(bp)) { BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); - goto mbx_done; + return; } + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); - mbx = BP_VF_MBX(bp, vf_idx); - /* verify an event is not currently being processed - - * debug failsafe only - */ - if (mbx->flags & VF_MSG_INPROCESS) { - BNX2X_ERR("Previous message is still being processed, vf_id %d\n", - vfpf_event->vf_id); - goto mbx_done; - } - vf = BP_VF(bp, vf_idx); + /* Update VFDB with current message and schedule its handling */ + mutex_lock(&BP_VFDB(bp)->event_mutex); + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); + mutex_unlock(&BP_VFDB(bp)->event_mutex); - /* save the VF message address */ - mbx->vf_addr_hi = vfpf_event->msg_addr_hi; - mbx->vf_addr_lo = vfpf_event->msg_addr_lo; - DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", - mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} - /* dmae to get the VF request */ - rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, - mbx->vf_addr_hi, mbx->vf_addr_lo, - sizeof(union vfpf_tlvs)/4); - if (rc) { - BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); - goto mbx_error; - } +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ + struct bnx2x_vfdb *vfdb = BP_VFDB(bp); + u64 events; + u8 vf_idx; + int rc; - /* process the VF message header */ - mbx->first_tlv = mbx->msg->req.first_tlv; + if (!vfdb) + return; - /* Clean response buffer to refrain from falsely seeing chains */ - memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + mutex_lock(&vfdb->event_mutex); + events = vfdb->event_occur; + vfdb->event_occur = 0; + mutex_unlock(&vfdb->event_mutex); - /* dispatch the request (will prepare the response) */ - bnx2x_vf_mbx_request(bp, vf, mbx); - goto mbx_done; + for_each_vf(bp, vf_idx) { + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); -mbx_error: - bnx2x_vf_release(bp, vf, false); /* non blocking */ -mbx_done: - return; + /* Handle VFs which have pending events */ + if (!(events & (1ULL << vf_idx))) + continue; + + DP(BNX2X_MSG_IOV, + "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", + vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, + mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, + vf->abs_vfid, mbx->vf_addr_hi, + mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", + vf->abs_vfid); + bnx2x_vf_release(bp, vf); + return; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* Clean response buffer to refrain from falsely + * seeing chains. + */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + } } /* propagate local bulletin board to vf */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 208568bc7a71..c922b81170e5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv { #define PFVF_CAP_RSS 0x00000001 #define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 char fw_ver[32]; u16 db_size; u8 indices_per_sb; @@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv { u32 rx_mask; /* see mask constants at the top of the file */ }; +struct vfpf_tpa_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_tpa_client_info { + aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF]; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u16 sge_buff_size; + u16 max_agg_size; + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; + } tpa_client_info; +}; + /* close VF (disable VF) */ struct vfpf_close_tlv { struct vfpf_first_tlv first_tlv; @@ -331,6 +351,7 @@ union vfpf_tlvs { struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_release_tlv release; struct vfpf_rss_tlv update_rss; + struct vfpf_tpa_tlv update_tpa; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; @@ -405,6 +426,7 @@ enum channel_tlvs { CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_PHYS_PORT_ID, + CHANNEL_TLV_UPDATE_TPA, CHANNEL_TLV_MAX }; |