diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
96 files changed, 7218 insertions, 6187 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3facb55b7161..c18c3b373846 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -296,6 +296,7 @@ config ICE default n depends on PCI_MSI depends on PTP_1588_CLOCK_OPTIONAL + depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB select NET_DEVLINK diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 59e82d131d88..721f86fd5802 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { static int e1000_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { + u32 speed, supported, advertising, lp_advertising, lpa_t; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 speed, supported, advertising; if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | @@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev, SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_Asym_Pause | SUPPORTED_Autoneg | + SUPPORTED_Pause | SUPPORTED_TP); if (hw->phy.type == e1000_phy_ife) supported &= ~SUPPORTED_1000baseT_Full; @@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev, if (hw->phy.media_type != e1000_media_type_copper) cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000); + lp_advertising = lpa_t | + mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 04acd1a992fa..e1eb1de88bf9 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7418,9 +7418,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_pci_reg; - /* AER (Advanced Error Reporting) hooks */ - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); /* PCI config space info */ err = pci_save_state(pdev); @@ -7708,7 +7705,6 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -7775,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev) free_netdev(netdev); - /* AER disable */ - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 060b263348ce..08c3d477dd6f 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -2,6 +2,7 @@ /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" +#include <linux/ethtool.h> static s32 e1000_wait_autoneg(struct e1000_hw *hw); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, @@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg &= ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled, and Tx Flow control is @@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is @@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; + phy->autoneg_advertised |= ADVERTISED_Asym_Pause; + phy->autoneg_advertised &= ~ADVERTISED_Pause; break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software @@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; default: e_dbg("Flow control param set incorrectly\n"); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b473cb7d7c57..027d721feb18 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2127,8 +2127,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_reg; } - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); pci_save_state(pdev); @@ -2227,7 +2225,6 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -2281,8 +2278,6 @@ static void fm10k_remove(struct pci_dev *pdev) pci_release_mem_regions(pdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 60e351665c70..60ce4d15d82a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -33,6 +33,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/udp_tunnel.h> @@ -176,7 +177,7 @@ enum i40e_interrupt_policy { struct i40e_lump_tracking { u16 num_entries; - u16 list[0]; + u16 list[]; #define I40E_PILE_VALID_BIT 0x8000 #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) }; @@ -1287,9 +1288,9 @@ void i40e_ptp_stop(struct i40e_pf *pf); int i40e_ptp_alloc_pins(struct i40e_pf *pf); int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); -i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); -i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); -i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf); +int i40e_get_partition_bw_setting(struct i40e_pf *pf); +int i40e_set_partition_bw_setting(struct i40e_pf *pf); +int i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 42439f725aa4..86fac8f959bb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) { - i40e_status ret_code; + int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, @@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) { - i40e_status ret_code; + int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, @@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw) * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +static int i40e_alloc_arq_bufs(struct i40e_hw *hw) { - i40e_status ret_code; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; + int ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -182,10 +182,10 @@ unwind_alloc_arq_bufs: * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +static int i40e_alloc_asq_bufs(struct i40e_hw *hw) { - i40e_status ret_code; struct i40e_dma_mem *bi; + int ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ @@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw) * * Configure base address and length registers for the transmit queue **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +static int i40e_config_asq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) * * Configure base address and length registers for the receive (event queue) **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +static int i40e_config_arq_regs(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) +static int i40e_init_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -393,9 +393,9 @@ init_adminq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) +static int i40e_init_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -445,9 +445,9 @@ init_adminq_exit: * * The main shutdown routine for the Admin Send Queue **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +static int i40e_shutdown_asq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; mutex_lock(&hw->aq.asq_mutex); @@ -479,9 +479,9 @@ shutdown_asq_out: * * The main shutdown routine for the Admin Receive Queue **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +static int i40e_shutdown_arq(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; mutex_lock(&hw->aq.arq_mutex); @@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ -i40e_status i40e_init_adminq(struct i40e_hw *hw) +int i40e_init_adminq(struct i40e_hw *hw) { u16 cfg_ptr, oem_hi, oem_lo; u16 eetrack_lo, eetrack_hi; - i40e_status ret_code; int retry = 0; + int ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || @@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw) * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -static i40e_status +static int i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ @@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context) { - i40e_status status = 0; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = false; u16 retval = 0; + int status = 0; u32 val = 0; if (hw->aq.asq.count == 0) { @@ -984,7 +984,7 @@ asq_send_command_error: * Acquires the lock and calls the main send command execution * routine. **/ -i40e_status +int i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ @@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context) { - i40e_status status; + int status; mutex_lock(&hw->aq.asq_mutex); status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, @@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, return status; } -i40e_status +int i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) @@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, * routine. Returns the last Admin Queue status in aq_status * to avoid race conditions in access to hw->aq.asq_last_status. **/ -i40e_status +int i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ @@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, bool is_atomic_context, enum i40e_admin_queue_err *aq_status) { - i40e_status status; + int status; mutex_lock(&hw->aq.asq_mutex); status = i40e_asq_send_command_atomic_exec(hw, desc, buff, @@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, return status; } -i40e_status +int i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, @@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, * the contents through e. It can also return how many events are * left to process through 'pending' **/ -i40e_status i40e_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *pending) +int i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) { - i40e_status ret_code = 0; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; + int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index cb8689222c8b..a6c9a9e343d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -20,16 +20,16 @@ enum i40e_memory_type { }; /* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - enum i40e_memory_type type, - u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem, - u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem); +int i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +int i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +int i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +int i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); #endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 10d7a982a5b9..639c5a1ca853 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -541,9 +541,9 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, { struct i40e_pf *pf = ldev->pf; struct i40e_hw *hw = &pf->hw; - i40e_status err; + int err; - err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP, + err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA, 0, msg, len, NULL); if (err) dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", @@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; bool update = true; - i40e_status err; + int err; /* TODO: for now do not allow setting VF's VSI setting */ if (is_vf) @@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (err) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; @@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (err) { dev_info(&pf->pdev->dev, - "update VSI ctxt for PE failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + "update VSI ctxt for PE failed, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8f764ff5c990..ed88e38d488b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -14,9 +14,9 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) +int i40e_set_mac_type(struct i40e_hw *hw) { - i40e_status status = 0; + int status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { @@ -125,154 +125,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) } /** - * i40e_stat_str - convert status err code to a string - * @hw: pointer to the HW structure - * @stat_err: the status error code to convert - **/ -const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) -{ - switch (stat_err) { - case 0: - return "OK"; - case I40E_ERR_NVM: - return "I40E_ERR_NVM"; - case I40E_ERR_NVM_CHECKSUM: - return "I40E_ERR_NVM_CHECKSUM"; - case I40E_ERR_PHY: - return "I40E_ERR_PHY"; - case I40E_ERR_CONFIG: - return "I40E_ERR_CONFIG"; - case I40E_ERR_PARAM: - return "I40E_ERR_PARAM"; - case I40E_ERR_MAC_TYPE: - return "I40E_ERR_MAC_TYPE"; - case I40E_ERR_UNKNOWN_PHY: - return "I40E_ERR_UNKNOWN_PHY"; - case I40E_ERR_LINK_SETUP: - return "I40E_ERR_LINK_SETUP"; - case I40E_ERR_ADAPTER_STOPPED: - return "I40E_ERR_ADAPTER_STOPPED"; - case I40E_ERR_INVALID_MAC_ADDR: - return "I40E_ERR_INVALID_MAC_ADDR"; - case I40E_ERR_DEVICE_NOT_SUPPORTED: - return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_PRIMARY_REQUESTS_PENDING: - return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; - case I40E_ERR_INVALID_LINK_SETTINGS: - return "I40E_ERR_INVALID_LINK_SETTINGS"; - case I40E_ERR_AUTONEG_NOT_COMPLETE: - return "I40E_ERR_AUTONEG_NOT_COMPLETE"; - case I40E_ERR_RESET_FAILED: - return "I40E_ERR_RESET_FAILED"; - case I40E_ERR_SWFW_SYNC: - return "I40E_ERR_SWFW_SYNC"; - case I40E_ERR_NO_AVAILABLE_VSI: - return "I40E_ERR_NO_AVAILABLE_VSI"; - case I40E_ERR_NO_MEMORY: - return "I40E_ERR_NO_MEMORY"; - case I40E_ERR_BAD_PTR: - return "I40E_ERR_BAD_PTR"; - case I40E_ERR_RING_FULL: - return "I40E_ERR_RING_FULL"; - case I40E_ERR_INVALID_PD_ID: - return "I40E_ERR_INVALID_PD_ID"; - case I40E_ERR_INVALID_QP_ID: - return "I40E_ERR_INVALID_QP_ID"; - case I40E_ERR_INVALID_CQ_ID: - return "I40E_ERR_INVALID_CQ_ID"; - case I40E_ERR_INVALID_CEQ_ID: - return "I40E_ERR_INVALID_CEQ_ID"; - case I40E_ERR_INVALID_AEQ_ID: - return "I40E_ERR_INVALID_AEQ_ID"; - case I40E_ERR_INVALID_SIZE: - return "I40E_ERR_INVALID_SIZE"; - case I40E_ERR_INVALID_ARP_INDEX: - return "I40E_ERR_INVALID_ARP_INDEX"; - case I40E_ERR_INVALID_FPM_FUNC_ID: - return "I40E_ERR_INVALID_FPM_FUNC_ID"; - case I40E_ERR_QP_INVALID_MSG_SIZE: - return "I40E_ERR_QP_INVALID_MSG_SIZE"; - case I40E_ERR_QP_TOOMANY_WRS_POSTED: - return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; - case I40E_ERR_INVALID_FRAG_COUNT: - return "I40E_ERR_INVALID_FRAG_COUNT"; - case I40E_ERR_QUEUE_EMPTY: - return "I40E_ERR_QUEUE_EMPTY"; - case I40E_ERR_INVALID_ALIGNMENT: - return "I40E_ERR_INVALID_ALIGNMENT"; - case I40E_ERR_FLUSHED_QUEUE: - return "I40E_ERR_FLUSHED_QUEUE"; - case I40E_ERR_INVALID_PUSH_PAGE_INDEX: - return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; - case I40E_ERR_INVALID_IMM_DATA_SIZE: - return "I40E_ERR_INVALID_IMM_DATA_SIZE"; - case I40E_ERR_TIMEOUT: - return "I40E_ERR_TIMEOUT"; - case I40E_ERR_OPCODE_MISMATCH: - return "I40E_ERR_OPCODE_MISMATCH"; - case I40E_ERR_CQP_COMPL_ERROR: - return "I40E_ERR_CQP_COMPL_ERROR"; - case I40E_ERR_INVALID_VF_ID: - return "I40E_ERR_INVALID_VF_ID"; - case I40E_ERR_INVALID_HMCFN_ID: - return "I40E_ERR_INVALID_HMCFN_ID"; - case I40E_ERR_BACKING_PAGE_ERROR: - return "I40E_ERR_BACKING_PAGE_ERROR"; - case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: - return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; - case I40E_ERR_INVALID_PBLE_INDEX: - return "I40E_ERR_INVALID_PBLE_INDEX"; - case I40E_ERR_INVALID_SD_INDEX: - return "I40E_ERR_INVALID_SD_INDEX"; - case I40E_ERR_INVALID_PAGE_DESC_INDEX: - return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; - case I40E_ERR_INVALID_SD_TYPE: - return "I40E_ERR_INVALID_SD_TYPE"; - case I40E_ERR_MEMCPY_FAILED: - return "I40E_ERR_MEMCPY_FAILED"; - case I40E_ERR_INVALID_HMC_OBJ_INDEX: - return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; - case I40E_ERR_INVALID_HMC_OBJ_COUNT: - return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; - case I40E_ERR_INVALID_SRQ_ARM_LIMIT: - return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; - case I40E_ERR_SRQ_ENABLED: - return "I40E_ERR_SRQ_ENABLED"; - case I40E_ERR_ADMIN_QUEUE_ERROR: - return "I40E_ERR_ADMIN_QUEUE_ERROR"; - case I40E_ERR_ADMIN_QUEUE_TIMEOUT: - return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; - case I40E_ERR_BUF_TOO_SHORT: - return "I40E_ERR_BUF_TOO_SHORT"; - case I40E_ERR_ADMIN_QUEUE_FULL: - return "I40E_ERR_ADMIN_QUEUE_FULL"; - case I40E_ERR_ADMIN_QUEUE_NO_WORK: - return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; - case I40E_ERR_BAD_IWARP_CQE: - return "I40E_ERR_BAD_IWARP_CQE"; - case I40E_ERR_NVM_BLANK_MODE: - return "I40E_ERR_NVM_BLANK_MODE"; - case I40E_ERR_NOT_IMPLEMENTED: - return "I40E_ERR_NOT_IMPLEMENTED"; - case I40E_ERR_PE_DOORBELL_NOT_ENABLED: - return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; - case I40E_ERR_DIAG_TEST_FAILED: - return "I40E_ERR_DIAG_TEST_FAILED"; - case I40E_ERR_NOT_READY: - return "I40E_ERR_NOT_READY"; - case I40E_NOT_SUPPORTED: - return "I40E_NOT_SUPPORTED"; - case I40E_ERR_FIRMWARE_API_VERSION: - return "I40E_ERR_FIRMWARE_API_VERSION"; - case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: - return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); - return hw->err_str; -} - -/** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask @@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw) * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) +int i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) { struct i40e_aq_desc desc; struct i40e_aqc_queue_shutdown *cmd = (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); @@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, * * Internal function to get or set RSS look up table **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) +static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) { - i40e_status status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_lut *cmd_resp = (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + int status; if (set) i40e_fill_default_direct_cmd_desc(&desc, @@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, * * get the RSS lookup table, PF or VSI type **/ -i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) +int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false); @@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, * * set the RSS lookup table, PF or VSI type **/ -i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) +int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) { return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); } @@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, * * get the RSS key per VSI **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, - bool set) +static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) { - i40e_status status; struct i40e_aq_desc desc; struct i40e_aqc_get_set_rss_key *cmd_resp = (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + int status; if (set) i40e_fill_default_direct_cmd_desc(&desc, @@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, * @key: pointer to key info struct * **/ -i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) +int i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); } @@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, * * set the RSS key per VSI **/ -i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) +int i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) { return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } @@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ -i40e_status i40e_init_shared_code(struct i40e_hw *hw) +int i40e_init_shared_code(struct i40e_hw *hw) { - i40e_status status = 0; u32 port, ari, func_rid; + int status = 0; i40e_set_mac_type(hw); @@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) * @addrs: the requestor's mac addr store * @cmd_details: pointer to command details structure or NULL **/ -static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, - u16 *flags, - struct i40e_aqc_mac_address_read_data *addrs, - struct i40e_asq_cmd_details *cmd_details) +static int +i40e_aq_mac_address_read(struct i40e_hw *hw, + u16 *flags, + struct i40e_aqc_mac_address_read_data *addrs, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_read *cmd_data = (struct i40e_aqc_mac_address_read *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); @@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, * @mac_addr: address to write * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, - u16 flags, u8 *mac_addr, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_mac_address_write *cmd_data = (struct i40e_aqc_mac_address_write *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); @@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, * * Reads the adapter's MAC address from register **/ -i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; - i40e_status status; u16 flags = 0; + int status; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); @@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) * * Reads the adapter's Port MAC address **/ -i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) { struct i40e_aqc_mac_address_read_data addrs; - i40e_status status; u16 flags = 0; + int status; status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); if (status) @@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) * * Reads the part number string from the EEPROM. **/ -i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size) +int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size) { - i40e_status status = 0; u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; + int status = 0; u16 i = 0; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); @@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) * @hw: pointer to the hardware structure * @retry_limit: how many times to retry before failure **/ -static i40e_status i40e_poll_globr(struct i40e_hw *hw, - u32 retry_limit) +static int i40e_poll_globr(struct i40e_hw *hw, + u32 retry_limit) { u32 cnt, reg = 0; @@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw, * Assuming someone else has triggered a global reset, * assure the global reset is complete and then reset the PF **/ -i40e_status i40e_pf_reset(struct i40e_hw *hw) +int i40e_pf_reset(struct i40e_hw *hw) { u32 cnt = 0; u32 cnt1 = 0; @@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) * * Returns the various PHY abilities supported on the Port. **/ -i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, - bool qualified_modules, bool report_init, - struct i40e_aq_get_phy_abilities_resp *abilities, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details) { - struct i40e_aq_desc desc; - i40e_status status; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; + struct i40e_aq_desc desc; + int status; if (!abilities) return I40E_ERR_PARAM; @@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, * of the PHY Config parameters. This status will be indicated by the * command response. **/ -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, - struct i40e_aq_set_phy_config *config, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aq_set_phy_config *cmd = (struct i40e_aq_set_phy_config *)&desc.params.raw; - enum i40e_status_code status; + int status; if (!config) return I40E_ERR_PARAM; @@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -static noinline_for_stack enum i40e_status_code +static noinline_for_stack int i40e_set_fc_status(struct i40e_hw *hw, struct i40e_aq_get_phy_abilities_resp *abilities, bool atomic_restart) @@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw, * * Set the requested flow control mode using set_phy_config. **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) +int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) { struct i40e_aq_get_phy_abilities_resp abilities; - enum i40e_status_code status; + int status; *aq_failures = 0x0; @@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, * * Tell the firmware that the driver is taking over from PXE **/ -i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) { - i40e_status status; struct i40e_aq_desc desc; struct i40e_aqc_clear_pxe *cmd = (struct i40e_aqc_clear_pxe *)&desc.params.raw; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_clear_pxe_mode); @@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, * * Sets up the link and restarts the Auto-Negotiation over the link. **/ -i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, - bool enable_link, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_link_restart_an *cmd = (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_link_restart_an); @@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, * * Returns the link status of the adapter. **/ -i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, - bool enable_lse, struct i40e_link_status *link, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_link_status *resp = (struct i40e_aqc_get_link_status *)&desc.params.raw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; - i40e_status status; bool tx_pause, rx_pause; u16 command_flags; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); @@ -1811,14 +1665,14 @@ aq_get_link_info_exit: * * Set link interrupt mask. **/ -i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, - u16 mask, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_int_mask *cmd = (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_int_mask); @@ -1838,8 +1692,8 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, * * Enable/disable loopback on a given port */ -i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_lb_mode *cmd = @@ -1864,13 +1718,13 @@ i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, * * Reset the external PHY. **/ -i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); @@ -1905,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, * * Add a VSI context to the hardware. **/ -i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = @@ -1915,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vsi); @@ -1949,15 +1803,15 @@ aq_add_vsi_exit: * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, - u16 seid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -1977,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, * @seid: vsi number * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, - u16 seid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_clear_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *) &desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2007,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ -i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 seid, bool set, - struct i40e_asq_cmd_details *cmd_details, - bool rx_only_promisc) +int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - i40e_status status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2047,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, * @set: set multicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, - u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - i40e_status status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2080,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - enum i40e_status_code status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2116,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - enum i40e_status_code status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2158,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, u16 vid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - i40e_status status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2193,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, * * Set or clear the broadcast promiscuous flag (filter) for a given VSI. **/ -i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, - u16 seid, bool set_filter, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 seid, bool set_filter, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2226,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; - i40e_status status; u16 flags = 0; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2256,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = @@ -2266,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); @@ -2298,9 +2153,9 @@ aq_get_vsi_params_exit: * * Update a VSI context. **/ -i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = @@ -2308,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); @@ -2336,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, * * Fill the buf with switch configuration returned from AdminQ command **/ -i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, - struct i40e_aqc_get_switch_config_resp *buf, - u16 buf_size, u16 *start_seid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *scfg = (struct i40e_aqc_switch_seid *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_switch_config); @@ -2370,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * * Set switch configuration bits **/ -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, - u16 flags, - u16 valid_flags, u8 mode, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, u8 mode, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_switch_config *scfg = (struct i40e_aqc_set_switch_config *)&desc.params.raw; - enum i40e_status_code status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_switch_config); @@ -2407,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, * * Get the firmware version from the admin queue commands **/ -i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, - u16 *fw_major_version, u16 *fw_minor_version, - u32 *fw_build, - u16 *api_major_version, u16 *api_minor_version, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_version *resp = (struct i40e_aqc_get_version *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); @@ -2446,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, * * Send the driver version to the firmware **/ -i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, +int i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_driver_version *cmd = (struct i40e_aqc_driver_version *)&desc.params.raw; - i40e_status status; + int status; u16 len; if (dv == NULL) @@ -2488,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, * * Side effect: LinkStatusEvent reporting becomes enabled **/ -i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) +int i40e_get_link_status(struct i40e_hw *hw, bool *link_up) { - i40e_status status = 0; + int status = 0; if (hw->phy.get_link_info) { status = i40e_update_link_info(hw); @@ -2509,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_update_link_info - update status of the HW network link * @hw: pointer to the hw struct **/ -noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status = 0; + int status = 0; status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) @@ -2559,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) * This asks the FW to add a VEB between the uplink and downlink * elements. If the uplink SEID is 0, this will be a floating VEB. **/ -i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, - u16 downlink_seid, u8 enabled_tc, - bool default_port, u16 *veb_seid, - bool enable_stats, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *veb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_veb *cmd = (struct i40e_aqc_add_veb *)&desc.params.raw; struct i40e_aqc_add_veb_completion *resp = (struct i40e_aqc_add_veb_completion *)&desc.params.raw; - i40e_status status; u16 veb_flags = 0; + int status; /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) @@ -2617,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, * This retrieves the parameters for a particular VEB, specified by * uplink_seid, and returns them to the caller. **/ -i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, - u16 veb_seid, u16 *switch_id, - bool *floating, u16 *statistic_index, - u16 *vebs_used, u16 *vebs_free, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, + bool *floating, u16 *statistic_index, + u16 *vebs_used, u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_veb_parameters_completion *cmd_resp = (struct i40e_aqc_get_veb_parameters_completion *) &desc.params.raw; - i40e_status status; + int status; if (veb_seid == 0) return I40E_ERR_PARAM; @@ -2711,7 +2566,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list, * * Add MAC/VLAN addresses to the HW filtering **/ -i40e_status +int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details) @@ -2743,7 +2598,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, * It also calls _v2 versions of asq_send_command functions to * get the aq_status on the stack. **/ -i40e_status +int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, @@ -2771,15 +2626,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, * * Remove MAC/VLAN addresses from the HW filtering **/ -i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_remove_macvlan_element_data *mv_list, - u16 count, struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_macvlan *cmd = (struct i40e_aqc_macvlan *)&desc.params.raw; - i40e_status status; u16 buf_size; + int status; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; @@ -2818,7 +2674,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * It also calls _v2 versions of asq_send_command functions to * get the aq_status on the stack. **/ -i40e_status +int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, @@ -2866,19 +2722,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only **/ -static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, - u16 opcode, u16 sw_seid, u16 rule_type, u16 id, - u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) +static int i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) { struct i40e_aq_desc desc; struct i40e_aqc_add_delete_mirror_rule *cmd = (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; struct i40e_aqc_add_delete_mirror_rule_completion *resp = (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; - i40e_status status; u16 buf_size; + int status; buf_size = count * sizeof(*mr_list); @@ -2926,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ -i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free) +int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, + __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) { if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { @@ -2957,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ -i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free) +int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, + __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { @@ -2989,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * * send msg to vf **/ -i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, - u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_pf_vf_message *cmd = (struct i40e_aqc_pf_vf_message *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); cmd->id = cpu_to_le32(vfid); @@ -3024,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, * * Read the register using the admin queue commands **/ -i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, +int i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd_resp = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; - i40e_status status; + int status; if (reg_val == NULL) return I40E_ERR_PARAM; @@ -3059,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, * * Write to a register using the admin queue commands **/ -i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, - u32 reg_addr, u64 reg_val, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_reg_read_write *cmd = (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); @@ -3090,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, * * requests common resource using the admin queue commands **/ -i40e_status i40e_aq_request_resource(struct i40e_hw *hw, - enum i40e_aq_resources_ids resource, - enum i40e_aq_resource_access_type access, - u8 sdp_number, u64 *timeout, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd_resp = (struct i40e_aqc_request_resource *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); @@ -3129,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw, * * release common resource using the admin queue commands **/ -i40e_status i40e_aq_release_resource(struct i40e_hw *hw, - enum i40e_aq_resources_ids resource, - u8 sdp_number, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_request_resource *cmd = (struct i40e_aqc_request_resource *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); @@ -3161,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw, * * Read the NVM using the admin queue commands **/ -i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, void *data, - bool last_command, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; - i40e_status status; + int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { @@ -3207,14 +3065,14 @@ i40e_aq_read_nvm_exit: * * Erase the NVM sector using the admin queue commands **/ -i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, bool last_command, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; - i40e_status status; + int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { @@ -3255,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; u16 id, ocp_cfg_word0; - i40e_status status; u8 major_rev; + int status; u32 i = 0; cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; @@ -3497,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, * * Get the device capabilities descriptions from the firmware **/ -i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, - void *buff, u16 buff_size, u16 *data_size, - enum i40e_admin_queue_opc list_type_opc, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; struct i40e_aq_desc desc; - i40e_status status = 0; + int status = 0; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; @@ -3546,15 +3404,15 @@ exit: * * Update the NVM using the admin queue commands **/ -i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, void *data, - bool last_command, u8 preservation_flags, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, u8 preservation_flags, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_nvm_update *cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; - i40e_status status; + int status; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { @@ -3599,13 +3457,13 @@ i40e_aq_update_nvm_exit: * * Rearrange NVM structure, available only for transition FW **/ -i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_nvm_update *cmd; - i40e_status status; struct i40e_aq_desc desc; + int status; cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; @@ -3639,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit: * * Requests the complete LLDP MIB (entire packet). **/ -i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, - u8 mib_type, void *buff, u16 buff_size, - u16 *local_len, u16 *remote_len, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_get_mib *cmd = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; struct i40e_aqc_lldp_get_mib *resp = (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; - i40e_status status; + int status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; @@ -3689,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, * * Set the LLDP MIB. **/ -enum i40e_status_code +int i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_lldp_set_local_mib *cmd; - enum i40e_status_code status; struct i40e_aq_desc desc; + int status; cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; if (buff_size == 0 || !buff) @@ -3728,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes **/ -i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, - bool enable_update, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_update_mib *cmd = (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); @@ -3757,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, * Restore LLDP Agent factory settings if @restore set to True. In other case * only returns factory setting in AQ response. **/ -enum i40e_status_code +int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_restore *cmd = (struct i40e_aqc_lldp_restore *)&desc.params.raw; - i40e_status status; + int status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { i40e_debug(hw, I40E_DEBUG_ALL, @@ -3794,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, * * Stop or Shutdown the embedded LLDP Agent **/ -i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, - bool persist, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_stop *cmd = (struct i40e_aqc_lldp_stop *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); @@ -3829,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, * * Start the embedded LLDP Agent on all ports. **/ -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_lldp_start *cmd = (struct i40e_aqc_lldp_start *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); @@ -3861,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, * @dcb_enable: True if DCB configuration needs to be applied * **/ -enum i40e_status_code +int i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_dcb_parameters *cmd = (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; - i40e_status status; + int status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) return I40E_ERR_DEVICE_NOT_SUPPORTED; @@ -3894,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, * * Get CEE DCBX mode operational configuration from firmware **/ -i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, - void *buff, u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - i40e_status status; + int status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; @@ -3925,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, * and this function will call cpu_to_le16 to convert from Host byte order to * Little Endian order. **/ -i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, - u16 udp_port, u8 protocol_index, - u8 *filter_index, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_udp_tunnel *cmd = (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; struct i40e_aqc_del_udp_tunnel_completion *resp = (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); @@ -3956,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, * @index: filter index * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_remove_udp_tunnel *cmd = (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); @@ -3981,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, * * This deletes a switch element from the switch. **/ -i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_switch_seid *cmd = (struct i40e_aqc_switch_seid *)&desc.params.raw; - i40e_status status; + int status; if (seid == 0) return I40E_ERR_PARAM; @@ -4011,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, * recomputed and modified. The retval field in the descriptor * will be set to 0 when RPB is modified. **/ -i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); @@ -4035,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, * * Generic command handler for Tx scheduler AQ commands **/ -static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, +static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, void *buff, u16 buff_size, - enum i40e_admin_queue_opc opcode, + enum i40e_admin_queue_opc opcode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_tx_sched_ind *cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; - i40e_status status; + int status; bool cmd_param_flag = false; switch (opcode) { @@ -4093,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, * @max_credit: Max BW limit credits * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_configure_vsi_bw_limit *cmd = (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_vsi_bw_limit); @@ -4121,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_configure_vsi_tc_bw, @@ -4139,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_configure_switching_comp_ets_data *ets_data, - enum i40e_admin_queue_opc opcode, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, sizeof(*ets_data), opcode, cmd_details); @@ -4156,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +int +i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, struct i40e_asq_cmd_details *cmd_details) @@ -4173,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, * @bw_data: Buffer to hold VSI BW configuration * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_vsi_bw_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_bw_config, @@ -4190,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, * @bw_data: Buffer to hold VSI BW configuration per TC * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_vsi_ets_sla_config, @@ -4207,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, * @bw_data: Buffer to hold switching component's per TC BW config * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_ets_config, @@ -4224,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, * @bw_data: Buffer to hold current ETS configuration for the Physical Port * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_port_ets_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_port_ets_config, @@ -4241,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, * @bw_data: Buffer to hold switching component's BW configuration * @cmd_details: pointer to command details structure or NULL **/ -i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) { return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), i40e_aqc_opc_query_switching_comp_bw_config, @@ -4263,8 +4128,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, * Returns 0 if the values passed are valid and within * range else returns an error. **/ -static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings) +static int +i40e_validate_filter_settings(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; u32 fcoe_fmax; @@ -4350,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, * for a single PF. It is expected that these settings are programmed * at the driver initialization time. **/ -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings) +int i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) { - i40e_status ret = 0; u32 hash_lut_size = 0; + int ret = 0; u32 val; if (!settings) @@ -4424,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, * In return it will update the total number of perfect filter count in * the stats member. **/ -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_add_remove_control_packet_filter *cmd = @@ -4437,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, struct i40e_aqc_add_remove_control_packet_filter_completion *resp = (struct i40e_aqc_add_remove_control_packet_filter_completion *) &desc.params.raw; - i40e_status status; + int status; if (vsi_seid == 0) return I40E_ERR_PARAM; @@ -4483,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; - i40e_status status; + int status; status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, seid, 0, true, NULL, @@ -4505,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, * is not passed then only register at 'reg_addr0' is read. * **/ -static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, - u32 reg_addr0, u32 *reg_val0, - u32 reg_addr1, u32 *reg_val1) +static int i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) { struct i40e_aq_desc desc; struct i40e_aqc_alternate_write *cmd_resp = (struct i40e_aqc_alternate_write *)&desc.params.raw; - i40e_status status; + int status; if (!reg_val0) return I40E_ERR_PARAM; @@ -4541,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, * * Suspend port's Tx traffic **/ -i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_tx_sched_ind *cmd; struct i40e_aq_desc desc; - i40e_status status; + int status; cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); @@ -4563,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, * * Resume port's Tx traffic **/ -i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); @@ -4637,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) * Dump internal FW/HW data for debug purposes. * **/ -i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_debug_dump_internals *cmd = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; struct i40e_aqc_debug_dump_internals *resp = (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; - i40e_status status; + int status; if (buff_size == 0 || !buff) return I40E_ERR_PARAM; @@ -4689,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, * * Read bw from the alternate ram for the given pf **/ -i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, - u32 *max_bw, u32 *min_bw, - bool *min_valid, bool *max_valid) +int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) { - i40e_status status; u32 max_bw_addr, min_bw_addr; + int status; /* Calculate the address of the min/max bw registers */ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + @@ -4729,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, * * Configure partitions guaranteed/max bw **/ -i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, - struct i40e_aqc_configure_partition_bw_data *bw_data, - struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) { - i40e_status status; - struct i40e_aq_desc desc; u16 bwd_size = sizeof(*bw_data); + struct i40e_aq_desc desc; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_configure_partition_bw); @@ -4764,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, * * Reads specified PHY register value **/ -i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 *value) { - i40e_status status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; + int status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; @@ -4809,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, * * Writes specified PHY register value **/ -i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 value) { - i40e_status status = I40E_ERR_TIMEOUT; u8 port_num = (u8)hw->func_caps.mdio_port_num; + int status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; @@ -4850,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, * * Reads specified PHY register value **/ -i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value) { - i40e_status status = I40E_ERR_TIMEOUT; + u8 port_num = hw->func_caps.mdio_port_num; + int status = I40E_ERR_TIMEOUT; u32 command = 0; u16 retry = 1000; - u8 port_num = hw->func_caps.mdio_port_num; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4924,13 +4791,13 @@ phy_read_end: * * Writes value to specified PHY register **/ -i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value) { - i40e_status status = I40E_ERR_TIMEOUT; - u32 command = 0; - u16 retry = 1000; u8 port_num = hw->func_caps.mdio_port_num; + int status = I40E_ERR_TIMEOUT; + u16 retry = 1000; + u32 command = 0; command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4991,10 +4858,10 @@ phy_write_end: * * Writes value to specified PHY register **/ -i40e_status i40e_write_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value) { - i40e_status status; + int status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: @@ -5030,10 +4897,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, * * Reads specified PHY register value **/ -i40e_status i40e_read_phy_register(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value) { - i40e_status status; + int status; switch (hw->device_id) { case I40E_DEV_ID_1G_BASE_T_X722: @@ -5082,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) * * Blinks PHY link LED **/ -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval) +int i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval) { - i40e_status status = 0; - u32 i; - u16 led_ctl; - u16 gpio_led_port; - u16 led_reg; u16 led_addr = I40E_PHY_LED_PROV_REG_1; + u16 gpio_led_port; u8 phy_addr = 0; + int status = 0; + u16 led_ctl; u8 port_num; + u16 led_reg; + u32 i; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); @@ -5154,12 +5021,12 @@ phy_blinking_end: * @led_addr: LED register address * @reg_val: read register value **/ -static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, - u32 *reg_val) +static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, + u32 *reg_val) { - enum i40e_status_code status; u8 phy_addr = 0; u8 port_num; + int status; u32 i; *reg_val = 0; @@ -5188,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, * @led_addr: LED register address * @reg_val: register value to write **/ -static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, - u32 reg_val) +static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, + u32 reg_val) { - enum i40e_status_code status; u8 phy_addr = 0; u8 port_num; + int status; u32 i; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { @@ -5223,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, * @val: original value of register to use * **/ -i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, - u16 *val) +int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val) { - i40e_status status = 0; u16 gpio_led_port; u8 phy_addr = 0; - u16 reg_val; + u32 reg_val_aq; + int status = 0; u16 temp_addr; + u16 reg_val; u8 port_num; u32 i; - u32 reg_val_aq; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { status = @@ -5278,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, * Set led's on or off when controlled by the PHY * **/ -i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, - u16 led_addr, u32 mode) +int i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode) { - i40e_status status = 0; u32 led_ctl = 0; u32 led_reg = 0; + int status = 0; status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) @@ -5327,14 +5194,14 @@ restore_config: * Use the firmware to read the Rx control register, * especially useful if the Rx unit is under heavy pressure **/ -i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; + int status; if (!reg_val) return I40E_ERR_PARAM; @@ -5358,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, **/ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) { - i40e_status status = 0; bool use_register; + int status = 0; int retry = 5; u32 val = 0; @@ -5393,14 +5260,14 @@ do_retry: * Use the firmware to write to an Rx control register, * especially useful if the Rx unit is under heavy pressure **/ -i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_rx_ctl_reg_read_write *cmd = (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); @@ -5420,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, **/ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) { - i40e_status status = 0; bool use_register; + int status = 0; int retry = 5; use_register = (((hw->aq.api_maj_ver == 1) && @@ -5483,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_set_phy_register. **/ -enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, bool page_change, - bool set_mdio, u8 mdio_num, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_register); @@ -5528,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, * NOTE: In common cases MDIO I/F number should not be changed, thats why you * may use simple wrapper i40e_aq_get_phy_register. **/ -enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, bool page_change, - bool set_mdio, u8 mdio_num, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, bool page_change, + bool set_mdio, u8 mdio_num, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_phy_register_access *cmd = (struct i40e_aqc_phy_register_access *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_phy_register); @@ -5568,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, * @error_info: returns error information * @cmd_details: pointer to command details structure or NULL **/ -enum -i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_write_personalization_profile); @@ -5612,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ -enum -i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_applied_profiles *cmd = (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_personalization_profile_list); @@ -5719,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type, * @hw: pointer to the hw struct * @aq: command buffer containing all data to execute AQ **/ -static enum -i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, - struct i40e_profile_aq_section *aq) +static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, + struct i40e_profile_aq_section *aq) { - i40e_status status; struct i40e_aq_desc desc; u8 *msg = NULL; u16 msglen; + int status; i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); desc.flags |= cpu_to_le16(aq->flags); @@ -5766,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, * * Validates supported devices and profile's sections. */ -static enum i40e_status_code +static int i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id, bool rollback) { struct i40e_profile_section_header *sec = NULL; - i40e_status status = 0; struct i40e_section_table *sec_tbl; u32 vendor_dev_id; + int status = 0; u32 dev_cnt; u32 sec_off; u32 i; @@ -5831,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, * * Handles the download of a complete package. */ -enum i40e_status_code +int i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { - i40e_status status = 0; - struct i40e_section_table *sec_tbl; struct i40e_profile_section_header *sec = NULL; struct i40e_profile_aq_section *ddp_aq; - u32 section_size = 0; + struct i40e_section_table *sec_tbl; u32 offset = 0, info = 0; + u32 section_size = 0; + int status = 0; u32 sec_off; u32 i; @@ -5894,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, * * Rolls back previously loaded package. */ -enum i40e_status_code +int i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 track_id) { struct i40e_profile_section_header *sec = NULL; - i40e_status status = 0; struct i40e_section_table *sec_tbl; u32 offset = 0, info = 0; u32 section_size = 0; + int status = 0; u32 sec_off; int i; @@ -5946,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, * * Register a profile to the list of loaded profiles. */ -enum i40e_status_code +int i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { - i40e_status status = 0; struct i40e_profile_section_header *sec = NULL; struct i40e_profile_info *pinfo; u32 offset = 0, info = 0; + int status = 0; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; @@ -5988,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, * of the function. * **/ -enum i40e_status_code +int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) @@ -5996,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - enum i40e_status_code status; u16 buff_len; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); @@ -6025,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, * function. * **/ -enum i40e_status_code +int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) @@ -6033,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - i40e_status status; u16 buff_len; + int status; int i; i40e_fill_default_direct_cmd_desc(&desc, @@ -6082,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, * of the function. * **/ -enum i40e_status_code +int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count) @@ -6090,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - enum i40e_status_code status; u16 buff_len; + int status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_cloud_filters); @@ -6119,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, * function. * **/ -enum i40e_status_code +int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count) @@ -6127,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - i40e_status status; u16 buff_len; + int status; int i; i40e_fill_default_direct_cmd_desc(&desc, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 673f341f4c0c..90638b67f8dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -12,7 +12,7 @@ * * Get the DCBX status from the Firmware **/ -i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) { u32 reg; @@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, * * Parse DCB configuration from the LLDPDU **/ -i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, - struct i40e_dcbx_config *dcbcfg) +int i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) { - i40e_status ret = 0; struct i40e_lldp_org_tlv *tlv; - u16 type; - u16 length; u16 typelength; u16 offset = 0; + int ret = 0; + u16 length; + u16 type; if (!lldpmib || !dcbcfg) return I40E_ERR_PARAM; @@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, * * Query DCB configuration from the Firmware **/ -i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, - u8 bridgetype, - struct i40e_dcbx_config *dcbcfg) +int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) { - i40e_status ret = 0; struct i40e_virt_mem mem; + int ret = 0; u8 *lldpmib; /* Allocate the LLDPDU */ @@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config( * * Get IEEE mode DCB configuration from the Firmware **/ -static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) +static int i40e_get_ieee_dcb_config(struct i40e_hw *hw) { - i40e_status ret = 0; + int ret = 0; /* IEEE mode */ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; @@ -797,11 +797,11 @@ out: * * Get DCB configuration from the Firmware **/ -i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +int i40e_get_dcb_config(struct i40e_hw *hw) { - i40e_status ret = 0; - struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + int ret = 0; /* If Firmware version < v4.33 on X710/XL710, IEEE only */ if ((hw->mac.type == I40E_MAC_XL710) && @@ -867,11 +867,11 @@ out: * * Update DCB configuration from the Firmware **/ -i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) +int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) { - i40e_status ret = 0; struct i40e_lldp_variables lldp_cfg; u8 adminstatus = 0; + int ret = 0; if (!hw->func_caps.dcb) return I40E_NOT_SUPPORTED; @@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) * Get status of FW Link Layer Discovery Protocol (LLDP) Agent. * Status of agent is reported via @lldp_status parameter. **/ -enum i40e_status_code +int i40e_get_fw_lldp_status(struct i40e_hw *hw, enum i40e_get_fw_lldp_status_resp *lldp_status) { struct i40e_virt_mem mem; - i40e_status ret; u8 *lldpmib; + int ret; if (!lldp_status) return I40E_ERR_PARAM; @@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, * * Set DCB configuration to the Firmware **/ -i40e_status i40e_set_dcb_config(struct i40e_hw *hw) +int i40e_set_dcb_config(struct i40e_hw *hw) { struct i40e_dcbx_config *dcbcfg; struct i40e_virt_mem mem; u8 mib_type, *lldpmib; - i40e_status ret; u16 miblen; + int ret; /* update the hw local config */ dcbcfg = &hw->local_dcbx_config; @@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw) * * send DCB configuration to FW **/ -i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, - struct i40e_dcbx_config *dcbcfg) +int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg) { u16 length, offset = 0, tlvid, typelength; struct i40e_lldp_org_tlv *tlv; @@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, * * Reads the LLDP configuration data from NVM using passed addresses **/ -static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg, - u8 module, u32 word_offset) +static int _i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) { u32 address, offset = (2 * word_offset); - i40e_status ret; __le16 raw_mem; + int ret; u16 mem; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -1950,10 +1950,10 @@ err_lldp_cfg: * * Reads the LLDP configuration data from NVM **/ -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg) +int i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) { - i40e_status ret = 0; + int ret = 0; u32 mem; if (!lldp_cfg) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 2370ceecb061..6b60dc9b7736 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw, void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw, struct i40e_rx_pb_config *old_pb_cfg, struct i40e_rx_pb_config *new_pb_cfg); -i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, - u16 *status); -i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, - struct i40e_dcbx_config *dcbcfg); -i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, - u8 bridgetype, - struct i40e_dcbx_config *dcbcfg); -i40e_status i40e_get_dcb_config(struct i40e_hw *hw); -i40e_status i40e_init_dcb(struct i40e_hw *hw, - bool enable_mib_change); -enum i40e_status_code +int i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +int i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +int i40e_get_dcb_config(struct i40e_hw *hw); +int i40e_init_dcb(struct i40e_hw *hw, + bool enable_mib_change); +int i40e_get_fw_lldp_status(struct i40e_hw *hw, enum i40e_get_fw_lldp_status_resp *lldp_status); -i40e_status i40e_set_dcb_config(struct i40e_hw *hw); -i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, - struct i40e_dcbx_config *dcbcfg); +int i40e_set_dcb_config(struct i40e_hw *hw); +int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg); #endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index e32c61909b31..195421d863ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev, ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, - "Failed setting DCB ETS configuration err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed setting DCB ETS configuration err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev, ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, - "Failed setting DCB PFC configuration err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed setting DCB PFC configuration err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev, ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, - "Failed setting DCB configuration err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed setting DCB configuration err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev, ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg); if (ret) { dev_info(&pf->pdev->dev, - "Failed setting DCB configuration err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed setting DCB configuration err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index e1069ae658ad..7e8183762fd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw, { struct i40e_ddp_profile_list *profile_list; u8 buff[I40E_PROFILE_LIST_SIZE]; - i40e_status status; + int status; int i; status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, @@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, { struct i40e_ddp_profile_list *profile_list; u8 buff[I40E_PROFILE_LIST_SIZE]; - i40e_status status; + int status; int i; status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, @@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, * * Register a profile to the list of loaded profiles. */ -static enum i40e_status_code +static int i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { struct i40e_profile_section_header *sec; struct i40e_profile_info *pinfo; - i40e_status status; u32 offset = 0, info = 0; + int status; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; @@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, * * Removes DDP profile from the NIC. **/ -static enum i40e_status_code +static int i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id) { struct i40e_profile_section_header *sec; struct i40e_profile_info *pinfo; - i40e_status status; u32 offset = 0, info = 0; + int status; sec = (struct i40e_profile_section_header *)profile_info_sec; sec->tbl_size = 1; @@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, struct i40e_profile_segment *profile_hdr; struct i40e_profile_info pinfo; struct i40e_package_header *pkg_hdr; - i40e_status status; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 track_id; int istatus; + int status; pkg_hdr = (struct i40e_package_header *)data; if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c9dcd6d92c83..9954493cd448 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); i40e_veb_release(pf->veb[i]); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { - i40e_status ret; - u16 vid; unsigned int v; + int ret; + u16 vid; cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); if (cnt != 2) { @@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; - i40e_status ret; + int ret; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) @@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, desc = NULL; } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { struct i40e_aq_desc *desc; - i40e_status ret; u16 buffer_len; u8 *buff; + int ret; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index ef4d3762bf37..5b3519c6e362 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -10,8 +10,8 @@ * @reg: reg to be tested * @mask: bits to be touched **/ -static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, - u32 reg, u32 mask) +static int i40e_diag_reg_pattern_test(struct i40e_hw *hw, + u32 reg, u32 mask) { static const u32 patterns[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF @@ -74,9 +74,9 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = { * * Perform registers diagnostic test **/ -i40e_status i40e_diag_reg_test(struct i40e_hw *hw) +int i40e_diag_reg_test(struct i40e_hw *hw) { - i40e_status ret_code = 0; + int ret_code = 0; u32 reg, mask; u32 i, j; @@ -114,9 +114,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw) * * Perform EEPROM diagnostic test **/ -i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) +int i40e_diag_eeprom_test(struct i40e_hw *hw) { - i40e_status ret_code; + int ret_code; u16 reg_val; /* read NVM control word and if NVM valid, validate EEPROM checksum*/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3340f320a18..e641035c7297 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info { extern struct i40e_diag_reg_test_info i40e_reg_list[]; -i40e_status i40e_diag_reg_test(struct i40e_hw *hw); -i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); +int i40e_diag_reg_test(struct i40e_hw *hw); +int i40e_diag_eeprom_test(struct i40e_hw *hw); #endif /* _I40E_DIAG_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 887a735fe2a7..4934ff58332c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; bool autoneg_changed = false; - i40e_status status = 0; int timeout = 50; + int status = 0; int err = 0; __u32 speed; u8 autoneg; @@ -1455,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { netdev_info(netdev, - "Set phy config failed, err %s aq_err %s\n", - i40e_stat_str(hw, status), + "Set phy config failed, err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; goto done; @@ -1465,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, status = i40e_update_link_info(hw); if (status) netdev_dbg(netdev, - "Updating link info failed with err %s aq_err %s\n", - i40e_stat_str(hw, status), + "Updating link info failed with err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { @@ -1485,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status status = 0; + int status = 0; u32 flags = 0; int err = 0; @@ -1517,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { netdev_info(netdev, - "Set phy config failed, err %s aq_err %s\n", - i40e_stat_str(hw, status), + "Set phy config failed, err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; goto done; @@ -1531,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) * (e.g. no physical connection etc.) */ netdev_dbg(netdev, - "Updating link info failed with err %s aq_err %s\n", - i40e_stat_str(hw, status), + "Updating link info failed with err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); } @@ -1547,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev, struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status status = 0; + int status = 0; int err = 0; u8 fec_cfg; @@ -1634,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev) struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; - i40e_status ret = 0; + int ret = 0; ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); if (ret) { - netdev_info(netdev, "link restart failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + netdev_info(netdev, "link restart failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -1699,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev, struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; - i40e_status status; u8 aq_failures; int err = 0; + int status; u32 is_an; /* Changing the port's flow control is not supported if this isn't the @@ -1755,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev, status = i40e_set_fc(hw, &aq_failures, link_up); if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", - i40e_stat_str(hw, status), + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", - i40e_stat_str(hw, status), + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", - i40e_stat_str(hw, status), + netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n", + ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } @@ -2583,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - i40e_status status; bool link_up = false; + int status; netif_info(pf, hw, netdev, "link test\n"); status = i40e_get_link_status(&pf->hw, &link_up); @@ -2807,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct i40e_netdev_priv *np = netdev_priv(netdev); - i40e_status ret = 0; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int blink_freq = 2; u16 temp_status; + int ret = 0; switch (state) { case ETHTOOL_ID_ACTIVE: @@ -5247,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 reset_needed = 0; - i40e_status status; + int status; u32 i, j; orig_flags = READ_ONCE(pf->flags); @@ -5362,8 +5362,8 @@ flags_complete: 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't set switch config bits, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ @@ -5435,9 +5435,8 @@ flags_complete: return -EBUSY; default: dev_warn(&pf->pdev->dev, - "Starting FW LLDP agent failed: error: %s, %s\n", - i40e_stat_str(&pf->hw, - status), + "Starting FW LLDP agent failed: error: %pe, %s\n", + ERR_PTR(status), i40e_aq_str(&pf->hw, adq_err)); return -EINVAL; @@ -5477,8 +5476,8 @@ static int i40e_get_module_info(struct net_device *netdev, u32 sff8472_comp = 0; u32 sff8472_swap = 0; u32 sff8636_rev = 0; - i40e_status status; u32 type = 0; + int status; /* Check if firmware supports reading module EEPROM. */ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { @@ -5582,8 +5581,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev, struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; bool is_sfp = false; - i40e_status status; u32 value = 0; + int status; int i; if (!ee || !ee->len || !data) @@ -5624,10 +5623,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp phy_cfg; - enum i40e_status_code status = 0; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; + int status = 0; /* Get initial PHY capabilities */ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL); @@ -5689,11 +5688,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; - enum i40e_status_code status = I40E_SUCCESS; struct i40e_aq_set_phy_config config; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; + int status = I40E_SUCCESS; __le16 eee_capability; /* Deny parameters we don't support */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 163ee8c6311c..46f7950a0049 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -17,17 +17,17 @@ * @type: what type of segment descriptor we're manipulating * @direct_mode_sz: size to alloc in direct mode **/ -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz) +int i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz) { enum i40e_memory_type mem_type __attribute__((unused)); struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; + int ret_code = I40E_SUCCESS; struct i40e_dma_mem mem; - i40e_status ret_code = I40E_SUCCESS; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { @@ -106,19 +106,19 @@ exit: * aligned on 4K boundary and zeroed memory. * 2. It should be 4K in size. **/ -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg) +int i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) { - i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; - u64 *pd_addr; + int ret_code = 0; u64 page_desc; + u64 *pd_addr; if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; @@ -185,15 +185,15 @@ exit: * 1. Caller can deallocate the memory used by backing storage after this * function returns. **/ -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx) +int i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) { - i40e_status ret_code = 0; struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; + int ret_code = 0; u64 *pd_addr; /* calculate index */ @@ -241,11 +241,11 @@ exit: * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index **/ -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx) +int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; + int ret_code = 0; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -269,9 +269,9 @@ exit: * @idx: the page index * @is_pf: used to distinguish between VF and PF **/ -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf) +int i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; @@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor **/ -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx) +int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; + int ret_code = 0; sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -318,9 +318,9 @@ exit: * @idx: segment descriptor index to find the relevant page descriptor * @is_pf: used to distinguish between VF and PF **/ -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf) +int i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 3113792afaff..9960da07a573 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -187,28 +187,28 @@ struct i40e_hmc_info { /* add one more to the limit to correct our range */ \ *(pd_limit) += 1; \ } -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); + +int i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); +int i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); +int i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +int i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +int i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); #endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index d6e92ecddfbd..40c101f286d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, * Assumptions: * - HMC Resource Profile has been selected before calling this function. **/ -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num) +int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num) { struct i40e_hmc_obj_info *obj, *full_obj; - i40e_status ret_code = 0; + int ret_code = 0; u64 l2fpm_size; u32 size_exp; @@ -229,11 +229,11 @@ init_lan_hmc_out: * 1. caller can deallocate the memory used by pd after this function * returns. **/ -static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx) +static int i40e_remove_pd_page(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) { - i40e_status ret_code = 0; + int ret_code = 0; if (!i40e_prep_remove_pd_page(hmc_info, idx)) ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); @@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, * 1. caller can deallocate the memory used by backing storage after this * function returns. **/ -static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx) +static int i40e_remove_sd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) { - i40e_status ret_code = 0; + int ret_code = 0; if (!i40e_prep_remove_sd_bp(hmc_info, idx)) ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); @@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. **/ -static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, - struct i40e_hmc_lan_create_obj_info *info) +static int i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 pd_idx = 0, pd_lmt = 0; bool pd_error = false; u32 sd_idx, sd_lmt; + int ret_code = 0; u64 sd_size; u32 i, j; @@ -435,13 +435,13 @@ exit: * - This function will be called after i40e_init_lan_hmc() and before * any LAN/FCoE HMC objects can be created. **/ -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model) +int i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model) { struct i40e_hmc_lan_create_obj_info info; - i40e_status ret_code = 0; u8 hmc_fn_id = hw->hmc.hmc_fn_id; struct i40e_hmc_obj_info *obj; + int ret_code = 0; /* Initialize part of the create object info struct */ info.hmc_info = &hw->hmc; @@ -520,13 +520,13 @@ configure_lan_hmc_out: * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. **/ -static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, - struct i40e_hmc_lan_delete_obj_info *info) +static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info) { - i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; u32 pd_idx, pd_lmt, rel_pd_idx; u32 sd_idx, sd_lmt; + int ret_code = 0; u32 i, j; if (NULL == info) { @@ -632,10 +632,10 @@ exit: * This must be called by drivers as they are shutting down and being * removed from the OS. **/ -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) +int i40e_shutdown_lan_hmc(struct i40e_hw *hw) { struct i40e_hmc_lan_delete_obj_info info; - i40e_status ret_code; + int ret_code; info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; @@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits, * @context_bytes: pointer to the context bit array (DMA memory) * @hmc_type: the type of HMC resource **/ -static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, - u8 *context_bytes, - enum i40e_hmc_lan_rsrc_type hmc_type) +static int i40e_clear_hmc_context(struct i40e_hw *hw, + u8 *context_bytes, + enum i40e_hmc_lan_rsrc_type hmc_type) { /* clean the bit array */ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); @@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ -static i40e_status i40e_set_hmc_context(u8 *context_bytes, - struct i40e_context_ele *ce_info, - u8 *dest) +static int i40e_set_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) { int f; @@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, * base pointer. This function is used for LAN Queue contexts. **/ static -i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, - enum i40e_hmc_lan_rsrc_type rsrc_type, - u32 obj_idx) +int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) { struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; - i40e_status ret_code = 0; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; + int ret_code = 0; if (NULL == hmc_info) { ret_code = I40E_ERR_BAD_PTR; @@ -1042,11 +1042,11 @@ exit: * @hw: the hardware struct * @queue: the queue we care about **/ -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue) +int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue) { - i40e_status err; u8 *context_bytes; + int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); @@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, * @queue: the queue we care about * @s: the struct to be filled **/ -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s) +int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) { - i40e_status err; u8 *context_bytes; + int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); @@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, * @hw: the hardware struct * @queue: the queue we care about **/ -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue) +int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue) { - i40e_status err; u8 *context_bytes; + int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); @@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, * @queue: the queue we care about * @s: the struct to be filled **/ -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s) +int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) { - i40e_status err; u8 *context_bytes; + int err; err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index c46a2c449e60..9f960404c2b3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info { u32 count; }; -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); +int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +int i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +int i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); #endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 53d0083e35da..467001db5070 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p) spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vsi->type == I40E_VSI_MAIN) { - i40e_status ret; + int ret; ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) - netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", - i40e_stat_str(hw, ret), + netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } @@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Cannot set RSS key, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } @@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Cannot set RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } @@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, { struct i40e_hw *hw = &vsi->back->hw; enum i40e_admin_queue_err aq_status; - i40e_status aq_ret; + int aq_ret; aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, &aq_status); @@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name, if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) { *retval = -EIO; dev_info(&vsi->back->pdev->dev, - "ignoring delete macvlan error on %s, err %s, aq_err %s\n", - vsi_name, i40e_stat_str(hw, aq_ret), + "ignoring delete macvlan error on %s, err %pe, aq_err %s\n", + vsi_name, ERR_PTR(aq_ret), i40e_aq_str(hw, aq_status)); } } @@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, * * Returns status indicating success or failure; **/ -static i40e_status +static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { bool enable = f->state == I40E_FILTER_NEW; struct i40e_hw *hw = &vsi->back->hw; - i40e_status aq_ret; + int aq_ret; if (f->vlan == I40E_VLAN_ANY) { aq_ret = i40e_aq_set_vsi_broadcast(hw, @@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; + int aq_ret; if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB && @@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "Set default VSI failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), + "Set default VSI failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } else { @@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) true); if (aq_ret) { dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), + "set unicast promisc failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( @@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) promisc, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), + "set multicast promisc failed, err %pe, aq_err %s\n", + ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } @@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) unsigned int vlan_filters = 0; char vsi_name[16] = "PF"; int filter_list_len = 0; - i40e_status aq_ret = 0; u32 changed_flags = 0; struct hlist_node *h; struct i40e_pf *pf; int num_add = 0; int num_del = 0; + int aq_ret = 0; int retval = 0; u16 cmd_flags; int list_size; @@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed on %s, err %s aq_err %s\n", + "set multi promisc failed on %s, err %pe aq_err %s\n", vsi_name, - i40e_stat_str(hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { dev_info(&pf->pdev->dev, "%s allmulti mode.\n", @@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_aq_rc_to_posix(aq_ret, hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "Setting promiscuous %s failed on %s, err %s aq_err %s\n", + "Setting promiscuous %s failed on %s, err %pe aq_err %s\n", cur_promisc ? "on" : "off", vsi_name, - i40e_stat_str(hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); } } @@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) struct i40e_pf *pf = vsi->back; if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int frame_size = new_mtu + I40E_PACKET_HDR_PAD; if (frame_size > i40e_max_xdp_frame_size(vsi)) return -EINVAL; @@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) @@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vlan stripping failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), + "update vlan stripping failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } @@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; /* Don't modify stripping options if a port VLAN is active */ if (vsi->info.pvid) @@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vlan stripping failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), + "update vlan stripping failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } @@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; - i40e_status ret; + int ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); @@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add pvid failed, err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), + "add pvid failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); return -ENOENT; @@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_txq tx_ctx; - i40e_status err = 0; u32 qtx_ctl = 0; + int err = 0; if (ring_is_xdp(ring)) ring->xsk_pool = i40e_xsk_pool(ring); @@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; - i40e_status err = 0; + int err = 0; bool ok; int ret; @@ -5525,16 +5525,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status ret; u32 tc_bw_max; + int ret; int i; /* Get the VSI level BW configuration */ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get PF vsi bw config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi ets bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -5586,7 +5586,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_pf *pf = vsi->back; - i40e_status ret; + int ret; int i; /* There is no need to reset BW when mqprio mode is on. */ @@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { - dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } @@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Failed querying vsi bw info, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Failed querying vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } @@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Update vsi tc config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } @@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "Failed updating vsi bw info, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Failed updating vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); goto out; } @@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, - "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", - max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), + "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n", + max_tx_rate, seid, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } @@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) last_aq_status = pf->hw.aq.asq_last_status; if (ret) dev_info(&pf->pdev->dev, - "Failed to delete cloud filter, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed to delete cloud filter, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); kfree(cfilter); } @@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Cannot set RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); kfree(lut); return ret; @@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "add new vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "add new vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; @@ -6304,7 +6304,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; - i40e_status ret; + int ret; int i; memset(&bw_data, 0, sizeof(bw_data)); @@ -6340,9 +6340,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { - i40e_status ret; - int i; u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + int ret; + int i; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) mode, NULL); if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) dev_err(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "couldn't set switch config bits, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); @@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "VEB bw config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "VEB bw config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "Failed getting veb bw config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed getting veb bw config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Resume Port Tx failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Resume Port Tx failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); @@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf) ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Suspend Port Tx failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Suspend Port Tx failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, pf->state); @@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf, ret = i40e_set_dcb_config(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, - "Set DCB Config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Set DCB Config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) i40e_aqc_opc_modify_switching_comp_ets, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Modify Port ETS failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Modify Port ETS failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) ret = i40e_aq_dcb_updated(&pf->hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "DCB Updated failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "DCB Updated failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf) i40e_aqc_opc_enable_switching_comp_ets, NULL); if (err) { dev_info(&pf->pdev->dev, - "Enable Port ETS failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + "Enable Port ETS failed, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); err = -ENOENT; goto out; @@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; } else { dev_info(&pf->pdev->dev, - "Query for DCB configuration failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + "Query for DCB configuration failed, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -7416,15 +7416,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) * @pf: board private structure * @is_up: whether the link state should be forced up or down **/ -static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) +static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config = {0}; bool non_zero_phy_type = is_up; struct i40e_hw *hw = &pf->hw; - i40e_status err; u64 mask; u8 speed; + int err; /* Card might've been put in an unstable state by other drivers * and applications, which causes incorrect speed values being @@ -7436,8 +7436,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) NULL); if (err) { dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), + "failed to get phy cap., ret = %pe last_status = %s\n", + ERR_PTR(err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } @@ -7448,8 +7448,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) NULL); if (err) { dev_err(&pf->pdev->dev, - "failed to get phy cap., ret = %s last_status = %s\n", - i40e_stat_str(hw, err), + "failed to get phy cap., ret = %pe last_status = %s\n", + ERR_PTR(err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } @@ -7493,8 +7493,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) if (err) { dev_err(&pf->pdev->dev, - "set phy config ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), + "set phy config ret = %pe last_status = %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return err; } @@ -7657,11 +7657,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) * This function deletes a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ -static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) +static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) { struct i40e_aqc_remove_macvlan_element_data element; - i40e_status status; + int status; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); @@ -7683,12 +7683,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, * This function adds a mac filter on the channel VSI which serves as the * macvlan. Returns 0 on success. **/ -static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, - const u8 *macaddr, int *aq_err) +static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) { struct i40e_aqc_add_macvlan_element_data element; - i40e_status status; u16 cmd_flags = 0; + int status; ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; @@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, rx_ring->netdev = NULL; } dev_info(&pf->pdev->dev, - "Error adding mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), + "Error adding mac filter on macvlan err %pe, aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, aq_err)); netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); } @@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "Update vsi tc config failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + "Update vsi tc config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; } @@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev) ch->fwd = NULL; } else { dev_info(&pf->pdev->dev, - "Error deleting mac filter on macvlan err %s, aq_err %s\n", - i40e_stat_str(hw, ret), + "Error deleting mac filter on macvlan err %pe, aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, aq_err)); } break; @@ -8875,8 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, kfree(filter); if (err) { dev_err(&pf->pdev->dev, - "Failed to delete cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + "Failed to delete cloud filter, err %pe\n", + ERR_PTR(err)); return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); } @@ -9438,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, pf->flags &= ~I40E_FLAG_DCB_CAPABLE; } else { dev_info(&pf->pdev->dev, - "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -9887,8 +9887,8 @@ static void i40e_link_event(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; - i40e_status status; bool new_link, old_link; + int status; #ifdef CONFIG_I40E_DCB int err; #endif /* CONFIG_I40E_DCB */ @@ -10099,9 +10099,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; u16 pending, i = 0; - i40e_status ret; u16 opcode; u32 oldval; + int ret; u32 val; /* Do not run clean AQ when PF reset fails */ @@ -10265,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } @@ -10277,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi switch failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "update vsi switch failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -10301,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } @@ -10313,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi switch failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "update vsi switch failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -10458,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf, buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { dev_info(&pf->pdev->dev, - "capability discovery failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + "capability discovery failed, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENODEV; @@ -10580,7 +10580,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) struct i40e_cloud_filter *cfilter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; - i40e_status ret; + int ret; /* Add cloud filters back if they exist */ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, @@ -10596,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) if (ret) { dev_dbg(&pf->pdev->dev, - "Failed to rebuild cloud filter, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Failed to rebuild cloud filter, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; @@ -10615,7 +10615,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) static int i40e_rebuild_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch, *ch_tmp; - i40e_status ret; + int ret; if (list_empty(&vsi->ch_list)) return 0; @@ -10691,7 +10691,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi) static void i40e_prep_for_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret = 0; + int ret = 0; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); @@ -10796,7 +10796,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw) static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret; + int ret; ret = i40e_pf_reset(hw); if (ret) { @@ -10821,7 +10821,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; - i40e_status ret; + int ret; u32 val; int v; @@ -10837,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } @@ -10949,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) - dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. @@ -11053,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) - dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -11082,9 +11082,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) ret = i40e_set_promiscuous(pf, pf->cur_promisc); if (ret) dev_warn(&pf->pdev->dev, - "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", + "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n", pf->cur_promisc ? "on" : "off", - i40e_stat_str(&pf->hw, ret), + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_reset_all_vfs(pf, true); @@ -12218,8 +12218,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, (struct i40e_aqc_get_set_rss_key_data *)seed); if (ret) { dev_info(&pf->pdev->dev, - "Cannot get RSS key, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Cannot get RSS key, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; @@ -12232,8 +12232,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) { dev_info(&pf->pdev->dev, - "Cannot get RSS lut, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Cannot get RSS lut, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; @@ -12508,11 +12508,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition * @pf: board private structure **/ -i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) +int i40e_get_partition_bw_setting(struct i40e_pf *pf) { - i40e_status status; bool min_valid, max_valid; u32 max_bw, min_bw; + int status; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, &min_valid, &max_valid); @@ -12531,10 +12531,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) * i40e_set_partition_bw_setting - Set BW settings for this PF partition * @pf: board private structure **/ -i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) +int i40e_set_partition_bw_setting(struct i40e_pf *pf) { struct i40e_aqc_configure_partition_bw_data bw_data; - i40e_status status; + int status; memset(&bw_data, 0, sizeof(bw_data)); @@ -12553,12 +12553,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition * @pf: board private structure **/ -i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) +int i40e_commit_partition_bw_setting(struct i40e_pf *pf) { /* Commit temporary BW setting to permanent NVM image */ enum i40e_admin_queue_err last_aq_status; - i40e_status ret; u16 nvm_word; + int ret; if (pf->hw.partition_id != 1) { dev_info(&pf->pdev->dev, @@ -12573,8 +12573,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Cannot acquire NVM for read access, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -12590,8 +12590,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -12604,8 +12604,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "Cannot acquire NVM for write access, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -12624,8 +12624,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "BW settings NOT SAVED, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: @@ -12646,7 +12646,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 - i40e_status read_status = I40E_SUCCESS; + int read_status = I40E_SUCCESS; u16 sr_emp_sr_settings_ptr = 0; u16 features_enable = 0; u16 link_behavior = 0; @@ -12679,8 +12679,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) err_nvm: dev_warn(&pf->pdev->dev, - "total-port-shutdown feature is off due to read nvm error: %s\n", - i40e_stat_str(&pf->hw, read_status)); + "total-port-shutdown feature is off due to read nvm error: %pe\n", + ERR_PTR(read_status)); return ret; } @@ -13025,7 +13025,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; u8 type, filter_index; - i40e_status ret; + int ret; type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : I40E_AQC_TUNNEL_TYPE_NGE; @@ -13033,8 +13033,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev, ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, NULL); if (ret) { - netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -13049,12 +13049,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; - i40e_status ret; + int ret; ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); if (ret) { - netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n", - i40e_stat_str(hw, ret), + netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -13167,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -13339,9 +13341,11 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { - if (!prog) + if (!prog) { + xdp_features_clear_redirect_target(vsi->netdev); /* Wait until ndo_xsk_wakeup completes. */ synchronize_rcu(); + } i40e_reset_and_rebuild(pf, true, true); } @@ -13362,11 +13366,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, /* Kick start the NAPI context if there is an AF_XDP socket open * on that queue id. This so that receiving will start. */ - if (need_reset && prog) + if (need_reset && prog) { for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_pool) (void)i40e_xsk_wakeup(vsi->netdev, i, XDP_WAKEUP_RX); + xdp_features_set_redirect_target(vsi->netdev, true); + } return 0; } @@ -13801,6 +13807,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to @@ -13941,8 +13951,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get PF vsi config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; @@ -13971,8 +13981,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "update vsi failed, err %d aq_err %s\n", + ret, i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; @@ -13991,8 +14001,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "update vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; @@ -14014,9 +14024,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) * message and continue */ dev_info(&pf->pdev->dev, - "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", + "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n", enabled_tc, - i40e_stat_str(&pf->hw, ret), + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -14110,8 +14120,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add vsi failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "add vsi failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; @@ -14142,8 +14152,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get vsi bw info, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get vsi bw info, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; @@ -14589,8 +14599,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "query veb bw config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -14599,8 +14609,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw ets config failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "query veb bw ets config failed, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -14796,8 +14806,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, - "couldn't add VEB, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't add VEB, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } @@ -14807,16 +14817,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) &veb->stats_idx, NULL, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get VEB statistics idx, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get VEB statistics idx, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get VEB bw info, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't get VEB bw info, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; @@ -15026,8 +15036,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "get switch config failed err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "get switch config failed err %d aq_err %s\n", + ret, i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); kfree(aq_buf); @@ -15072,8 +15082,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, - "couldn't fetch switch config, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't fetch switch config, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } @@ -15099,8 +15109,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, - "couldn't set switch config bits, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), + "couldn't set switch config bits, err %pe aq_err %s\n", + ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ @@ -15437,13 +15447,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) * * Return 0 on success, negative on failure. **/ -static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +static int i40e_pf_loop_reset(struct i40e_pf *pf) { /* wait max 10 seconds for PF reset to succeed */ const unsigned long time_end = jiffies + 10 * HZ; - struct i40e_hw *hw = &pf->hw; - i40e_status ret; + int ret; ret = i40e_pf_reset(hw); while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { @@ -15489,9 +15498,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf) * Return 0 if NIC is healthy or negative value when there are issues * with resets **/ -static i40e_status i40e_handle_resets(struct i40e_pf *pf) +static int i40e_handle_resets(struct i40e_pf *pf) { - const i40e_status pfr = i40e_pf_loop_reset(pf); + const int pfr = i40e_pf_loop_reset(pf); const bool is_empr = i40e_check_fw_empr(pf); if (is_empr || pfr != I40E_SUCCESS) @@ -15589,7 +15598,6 @@ err_switch_setup: timer_shutdown_sync(&pf->service_timer); i40e_shutdown_adminq(hw); iounmap(hw->hw_addr); - pci_disable_pcie_error_reporting(pf->pdev); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); kfree(pf); @@ -15629,13 +15637,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct i40e_aq_get_phy_abilities_resp abilities; #ifdef CONFIG_I40E_DCB enum i40e_get_fw_lldp_status_resp lldp_status; - i40e_status status; #endif /* CONFIG_I40E_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; +#ifdef CONFIG_I40E_DCB + int status; +#endif /* CONFIG_I40E_DCB */ int err; u32 val; u32 i; @@ -15660,7 +15670,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_reg; } - pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); /* Now that we have a PCI connection, we need to do the @@ -16004,8 +16013,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) - dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Reconfigure hardware for allowing smaller MSS in the case @@ -16023,8 +16032,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) - dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, err), + dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } @@ -16156,8 +16165,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), + dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; @@ -16167,8 +16176,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) - dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", - i40e_stat_str(&pf->hw, err), + dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", + ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure the MFS hasn't been set lower than the default */ @@ -16218,7 +16227,6 @@ err_pf_reset: err_ioremap: kfree(pf); err_pf_alloc: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -16239,7 +16247,7 @@ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - i40e_status ret_code; + int ret_code; int i; i40e_dbg_pf_exit(pf); @@ -16366,7 +16374,6 @@ unmap: kfree(pf); pci_release_mem_regions(pdev); - pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } @@ -16487,9 +16494,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret; u8 mac_addr[6]; u16 flags = 0; + int ret; /* Get current MAC address in case it's an LAA */ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3a38bf8bcde7..9da0c87f0328 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -13,10 +13,10 @@ * in this file) as an equivalent of the FLASH part mapped into the SR. * We are accessing FLASH always thru the Shadow RAM. **/ -i40e_status i40e_init_nvm(struct i40e_hw *hw) +int i40e_init_nvm(struct i40e_hw *hw) { struct i40e_nvm_info *nvm = &hw->nvm; - i40e_status ret_code = 0; + int ret_code = 0; u32 fla, gens; u8 sr_size; @@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) * This function will request NVM ownership for reading * via the proper Admin Command. **/ -i40e_status i40e_acquire_nvm(struct i40e_hw *hw, - enum i40e_aq_resource_access_type access) +int i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access) { - i40e_status ret_code = 0; u64 gtime, timeout; u64 time_left = 0; + int ret_code = 0; if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; @@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit: **/ void i40e_release_nvm(struct i40e_hw *hw) { - i40e_status ret_code = I40E_SUCCESS; + int ret_code = I40E_SUCCESS; u32 total_delay = 0; if (hw->nvm.blank_nvm_mode) @@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw) * * Polls the SRCTL Shadow RAM register done bit. **/ -static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { - i40e_status ret_code = I40E_ERR_TIMEOUT; + int ret_code = I40E_ERR_TIMEOUT; u32 srctl, wait_cnt; /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ @@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, - u16 *data) +static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) { - i40e_status ret_code = I40E_ERR_TIMEOUT; + int ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; if (offset >= hw->nvm.sr_size) { @@ -216,13 +216,13 @@ read_nvm_exit: * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ -static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, - u8 module_pointer, u32 offset, - u16 words, void *data, - bool last_command) +static int i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data, + bool last_command) { - i40e_status ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; + int ret_code = I40E_ERR_NVM; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, * * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ -static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, - u16 *data) +static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) { - i40e_status ret_code = I40E_ERR_TIMEOUT; + int ret_code = I40E_ERR_TIMEOUT; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); *data = le16_to_cpu(*(__le16 *)data); @@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, * Do not use this function except in cases where the nvm lock is already * taken via i40e_acquire_nvm(). **/ -static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, - u16 offset, u16 *data) +static int __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_word_aq(hw, offset, data); @@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, * * Reads one 16 bit word from the Shadow RAM. **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) +int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) { - i40e_status ret_code = 0; + int ret_code = 0; if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, * @words_data_size: Words to read from NVM * @data_ptr: Pointer to memory location where resulting buffer will be stored **/ -enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, - u8 module_ptr, - u16 module_offset, - u16 data_offset, - u16 words_data_size, - u16 *data_ptr) +int i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, + u16 module_offset, + u16 data_offset, + u16 words_data_size, + u16 *data_ptr) { - i40e_status status; u16 specific_ptr = 0; u16 ptr_value = 0; u32 offset = 0; + int status; if (module_ptr != 0) { status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); @@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { - i40e_status ret_code = 0; + int ret_code = 0; u16 index, word; /* Loop thru the selected region */ @@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { - i40e_status ret_code; - u16 read_size; bool last_cmd = false; u16 words_read = 0; + u16 read_size; + int ret_code; u16 i = 0; do { @@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit: * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() * method. **/ -static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, - u16 offset, u16 *words, - u16 *data) +static int __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, u16 *words, + u16 *data) { if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) return i40e_read_nvm_buffer_aq(hw, offset, words, data); @@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { - i40e_status ret_code = 0; + int ret_code = 0; if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ -static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, - bool last_command) +static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) { - i40e_status ret_code = I40E_ERR_NVM; struct i40e_asq_cmd_details cmd_details; + int ret_code = I40E_ERR_NVM; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * is customer specific and unknown. Therefore, this function skips all maximum * possible size of VPD (1kB). **/ -static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, - u16 *checksum) +static int i40e_calc_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) { - i40e_status ret_code; struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; + int ret_code; u16 *data; u16 i = 0; @@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit: * on ARQ completion event reception by caller. * This function will commit SR to NVM. **/ -i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) +int i40e_update_nvm_checksum(struct i40e_hw *hw) { - i40e_status ret_code; - u16 checksum; __le16 le_sum; + int ret_code; + u16 checksum; ret_code = i40e_calc_nvm_checksum(hw, &checksum); if (!ret_code) { @@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) * Performs checksum calculation and validates the NVM SW checksum. If the * caller does not need checksum, the value can be NULL. **/ -i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, - u16 *checksum) +int i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) { - i40e_status ret_code = 0; - u16 checksum_sr = 0; u16 checksum_local = 0; + u16 checksum_sr = 0; + int ret_code = 0; /* We must acquire the NVM lock in order to correctly synchronize the * NVM accesses across multiple PFs. Without doing so it is possible @@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, return ret_code; } -static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); +static int i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, int *perrno); -static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); -static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno); +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); static inline u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = { * * Dispatches command depending on what update state is current **/ -i40e_status i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +int i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status; enum i40e_nvmupd_cmd upd_cmd; + int status; /* assume success */ *perrno = 0; @@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * Process legitimate commands of the Init state and conditionally set next * state. Reject all other commands. **/ -static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; + int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands. **/ -static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; + int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, * NVM ownership is already held. Process legitimate commands and set any * change in state; reject all other commands **/ -static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; enum i40e_nvmupd_cmd upd_cmd; bool retry_attempt = false; + int status = 0; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1187,8 +1187,8 @@ retry: */ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && !retry_attempt) { - i40e_status old_status = status; u32 old_asq_status = hw->aq.asq_last_status; + int old_status = status; u32 gtime; gtime = rd32(hw, I40E_GLVFGEN_TIMER); @@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, * * cmd structure contains identifiers and data buffer **/ -static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; - i40e_status status; struct i40e_aq_desc *aq_desc; u32 buff_size = 0; u8 *buff = NULL; u32 aq_desc_len; u32 aq_data_len; + int status; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); if (cmd->offset == 0xffff) @@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, buff_size, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_exec_aq err %s aq_err %s\n", - i40e_stat_str(hw, status), + "%s err %pe aq_err %s\n", + __func__, ERR_PTR(status), i40e_aq_str(hw, hw->aq.asq_last_status)); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); return status; @@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, * * cmd structure contains identifiers and data buffer **/ -static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; @@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, * * cmd structure contains identifiers and data buffer **/ -static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { u32 aq_total_len; u32 aq_desc_len; @@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, * * cmd structure contains identifiers and data buffer **/ -static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { struct i40e_asq_cmd_details cmd_details; - i40e_status status; u8 module, transaction; + int status; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); @@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, * * module, offset, data_size and data are in cmd structure **/ -static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) { - i40e_status status = 0; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; + int status = 0; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); @@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, * * module, offset, data_size and data are in cmd structure **/ -static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - i40e_status status = 0; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; u8 preservation_flags; + int status = 0; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 2f6815b2f8df..2bd4de03dafa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -56,5 +56,4 @@ do { \ (h)->bus.func, ##__VA_ARGS__); \ } while (0) -typedef enum i40e_status_code i40e_status; #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 9a71121420c3..fe845987d99a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -16,29 +16,29 @@ */ /* adminq functions */ -i40e_status i40e_init_adminq(struct i40e_hw *hw); +int i40e_init_adminq(struct i40e_hw *hw); void i40e_shutdown_adminq(struct i40e_hw *hw); void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40e_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -i40e_status +int i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +int i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); -i40e_status +int i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status); -i40e_status +int i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details, bool is_atomic_context); -i40e_status +int i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ @@ -53,327 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); -i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); +int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +int i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +int i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); -i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, - u16 led_addr, u32 mode); -i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, - u16 *val); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); +int i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode); +int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val); +int i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); /* admin send queue commands */ -i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, - u16 *fw_major_version, u16 *fw_minor_version, - u32 *fw_build, - u16 *api_major_version, u16 *api_minor_version, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, - u32 reg_addr, u64 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, +int i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_read_register(struct i40e_hw *hw, u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, - bool qualified_modules, bool report_init, - struct i40e_aq_get_phy_abilities_resp *abilities, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, - struct i40e_aq_set_phy_config *config, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_reset); -i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, - bool ena_lpbk, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, - bool enable_link, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, - bool enable_lse, struct i40e_link_status *link, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, - u64 advt_reg, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, +int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details); +int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_reset); +int i40e_aq_set_mac_loopback(struct i40e_hw *hw, + bool ena_lpbk, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, - u16 vsi_id, bool set_filter, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, - bool rx_only_promisc); -i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, - u16 vid, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, - u16 seid, bool enable, u16 vid, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, - u16 seid, bool enable, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, - struct i40e_vsi_context *vsi_ctx, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, - u16 downlink_seid, u8 enabled_tc, - bool default_port, u16 *pveb_seid, - bool enable_stats, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, - u16 veb_seid, u16 *switch_id, bool *floating, - u16 *statistic_index, u16 *vebs_used, - u16 *vebs_free, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, +int i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 vsi_id, bool set_filter, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc); +int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *pveb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, bool *floating, + u16 *statistic_index, u16 *vebs_used, + u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status +int i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_add_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status); -i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, - struct i40e_aqc_remove_macvlan_element_data *mv_list, - u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status +int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details, enum i40e_admin_queue_err *aq_status); -i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rule_id, u16 *rules_used, u16 *rules_free); -i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, - u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, - struct i40e_asq_cmd_details *cmd_details, - u16 *rules_used, u16 *rules_free); +int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); -i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, - u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, - struct i40e_aqc_get_switch_config_resp *buf, - u16 buf_size, u16 *start_seid, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, - u16 flags, - u16 valid_flags, u8 mode, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_request_resource(struct i40e_hw *hw, - enum i40e_aq_resources_ids resource, - enum i40e_aq_resource_access_type access, - u8 sdp_number, u64 *timeout, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_release_resource(struct i40e_hw *hw, - enum i40e_aq_resources_ids resource, - u8 sdp_number, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, void *data, - bool last_command, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, bool last_command, +int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, - void *buff, u16 buff_size, u16 *data_size, - enum i40e_admin_queue_opc list_type_opc, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 length, void *data, - bool last_command, u8 preservation_flags, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, - u8 rearrange_nvm, +int i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, u8 mode, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, - u8 mib_type, void *buff, u16 buff_size, - u16 *local_len, u16 *remote_len, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, u8 preservation_flags, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rearrange_nvm(struct i40e_hw *hw, + u8 rearrange_nvm, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_lldp_mib(struct i40e_hw *hw, u8 mib_type, void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, - bool enable_update, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, - bool persist, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw, - bool dcb_enable, - struct i40e_asq_cmd_details - *cmd_details); -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, +int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + bool persist, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_dcb_parameters(struct i40e_hw *hw, + bool dcb_enable, + struct i40e_asq_cmd_details + *cmd_details); +int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, - void *buff, u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, - u16 udp_port, u8 protocol_index, - u8 *filter_index, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, - u16 flags, u8 *mac_addr, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_credit, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, - u16 seid, u16 credit, u8 max_bw, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, +int i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_configure_switching_comp_ets_data *ets_data, - enum i40e_admin_queue_opc opcode, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_vsi_bw_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_port_ets_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, - struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); -enum i40e_status_code +int i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count); -enum i40e_status_code +int i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_cloud_filters_element_data *filters, u8 filter_count); -enum i40e_status_code +int i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, struct i40e_aqc_cloud_filters_element_bb *filters, u8 filter_count); -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg); -enum i40e_status_code +int i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); +int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details); /* i40e_common */ -i40e_status i40e_init_shared_code(struct i40e_hw *hw); -i40e_status i40e_pf_reset(struct i40e_hw *hw); +int i40e_init_shared_code(struct i40e_hw *hw); +int i40e_pf_reset(struct i40e_hw *hw); void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); -i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); -i40e_status i40e_update_link_info(struct i40e_hw *hw); -i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, - u32 *max_bw, u32 *min_bw, bool *min_valid, - bool *max_valid); -i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, - struct i40e_aqc_configure_partition_bw_data *bw_data, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size); -i40e_status i40e_validate_mac_addr(u8 *mac_addr); +int i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +int i40e_update_link_info(struct i40e_hw *hw); +int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +int +i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size); +int i40e_validate_mac_addr(u8 *mac_addr); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); /* prototype for functions used for NVM access */ -i40e_status i40e_init_nvm(struct i40e_hw *hw); -i40e_status i40e_acquire_nvm(struct i40e_hw *hw, - enum i40e_aq_resource_access_type access); +int i40e_init_nvm(struct i40e_hw *hw); +int i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data); -enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, - u8 module_ptr, - u16 module_offset, - u16 data_offset, - u16 words_data_size, - u16 *data_ptr); -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); -i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); -i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, - u16 *checksum); -i40e_status i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *); +int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +int i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, + u16 module_offset, + u16 data_offset, + u16 words_data_size, + u16 *data_ptr); +int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +int i40e_update_nvm_checksum(struct i40e_hw *hw); +int i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum); +int i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc); void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); -i40e_status i40e_set_mac_type(struct i40e_hw *hw); +int i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; @@ -422,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details); +int i40e_vf_reset(struct i40e_hw *hw); +int i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + int v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +int i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); -i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -enum i40e_status_code +int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, u8 phy_select, u8 dev_addr, bool page_change, bool set_mdio, u8 mdio_num, @@ -469,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw, #define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) -i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, - u8 page, u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); +int i40e_read_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 *value); +int i40e_write_phy_register_clause22(struct i40e_hw *hw, + u16 reg, u8 phy_addr, u16 value); +int i40e_read_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 *value); +int i40e_write_phy_register_clause45(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, u16 value); +int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, + u8 phy_addr, u16 *value); +int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, + u8 phy_addr, u16 value); u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); +int i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); +int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details * + cmd_details); +int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); struct i40e_profile_section_header * i40e_find_section_in_profile(u32 section_type, struct i40e_profile_segment *profile); -enum i40e_status_code +int i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); -enum i40e_status_code +int i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); -enum i40e_status_code +int i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index db3714a65dc7..4d2782e76038 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -9,65 +9,30 @@ enum i40e_status_code { I40E_SUCCESS = 0, I40E_ERR_NVM = -1, I40E_ERR_NVM_CHECKSUM = -2, - I40E_ERR_PHY = -3, I40E_ERR_CONFIG = -4, I40E_ERR_PARAM = -5, - I40E_ERR_MAC_TYPE = -6, I40E_ERR_UNKNOWN_PHY = -7, - I40E_ERR_LINK_SETUP = -8, - I40E_ERR_ADAPTER_STOPPED = -9, I40E_ERR_INVALID_MAC_ADDR = -10, I40E_ERR_DEVICE_NOT_SUPPORTED = -11, - I40E_ERR_PRIMARY_REQUESTS_PENDING = -12, - I40E_ERR_INVALID_LINK_SETTINGS = -13, - I40E_ERR_AUTONEG_NOT_COMPLETE = -14, I40E_ERR_RESET_FAILED = -15, - I40E_ERR_SWFW_SYNC = -16, I40E_ERR_NO_AVAILABLE_VSI = -17, I40E_ERR_NO_MEMORY = -18, I40E_ERR_BAD_PTR = -19, - I40E_ERR_RING_FULL = -20, - I40E_ERR_INVALID_PD_ID = -21, - I40E_ERR_INVALID_QP_ID = -22, - I40E_ERR_INVALID_CQ_ID = -23, - I40E_ERR_INVALID_CEQ_ID = -24, - I40E_ERR_INVALID_AEQ_ID = -25, I40E_ERR_INVALID_SIZE = -26, - I40E_ERR_INVALID_ARP_INDEX = -27, - I40E_ERR_INVALID_FPM_FUNC_ID = -28, - I40E_ERR_QP_INVALID_MSG_SIZE = -29, - I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, - I40E_ERR_INVALID_FRAG_COUNT = -31, I40E_ERR_QUEUE_EMPTY = -32, - I40E_ERR_INVALID_ALIGNMENT = -33, - I40E_ERR_FLUSHED_QUEUE = -34, - I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, - I40E_ERR_INVALID_IMM_DATA_SIZE = -36, I40E_ERR_TIMEOUT = -37, - I40E_ERR_OPCODE_MISMATCH = -38, - I40E_ERR_CQP_COMPL_ERROR = -39, - I40E_ERR_INVALID_VF_ID = -40, - I40E_ERR_INVALID_HMCFN_ID = -41, - I40E_ERR_BACKING_PAGE_ERROR = -42, - I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, - I40E_ERR_INVALID_PBLE_INDEX = -44, I40E_ERR_INVALID_SD_INDEX = -45, I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, I40E_ERR_INVALID_SD_TYPE = -47, - I40E_ERR_MEMCPY_FAILED = -48, I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, - I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, - I40E_ERR_SRQ_ENABLED = -52, I40E_ERR_ADMIN_QUEUE_ERROR = -53, I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, I40E_ERR_BUF_TOO_SHORT = -55, I40E_ERR_ADMIN_QUEUE_FULL = -56, I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, - I40E_ERR_BAD_IWARP_CQE = -58, I40E_ERR_NVM_BLANK_MODE = -59, I40E_ERR_NOT_IMPLEMENTED = -60, - I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, I40E_ERR_DIAG_TEST_FAILED = -62, I40E_ERR_NOT_READY = -63, I40E_NOT_SUPPORTED = -64, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 635f93d60318..8a4587585acd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -17,7 +17,7 @@ **/ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, + int v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; @@ -441,14 +441,14 @@ irq_list_done: } /** - * i40e_release_iwarp_qvlist + * i40e_release_rdma_qvlist * @vf: pointer to the VF. * **/ -static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) +static void i40e_release_rdma_qvlist(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; - struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; + struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; u32 msix_vf; u32 i; @@ -457,7 +457,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { - struct virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_rdma_qv_info *qv_info; u32 next_q_index, next_q_type; struct i40e_hw *hw = &pf->hw; u32 v_idx, reg_idx, reg; @@ -491,18 +491,19 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) } /** - * i40e_config_iwarp_qvlist + * i40e_config_rdma_qvlist * @vf: pointer to the VF info * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ -static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, - struct virtchnl_iwarp_qvlist_info *qvlist_info) +static int +i40e_config_rdma_qvlist(struct i40e_vf *vf, + struct virtchnl_rdma_qvlist_info *qvlist_info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - struct virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_rdma_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf; @@ -1246,13 +1247,13 @@ err: * @vl: List of VLANs - apply filter for given VLANs * @num_vlans: Number of elements in @vl **/ -static i40e_status +static int i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, bool unicast_enable, s16 *vl, u16 num_vlans) { - i40e_status aq_ret, aq_tmp = 0; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; + int aq_ret, aq_tmp = 0; int i; /* No VLAN to set promisc on, set on VSI */ @@ -1264,9 +1265,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); return aq_ret; @@ -1280,9 +1281,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); } @@ -1297,9 +1298,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); if (!aq_tmp) @@ -1313,9 +1314,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, int aq_err = pf->hw.aq.asq_last_status; dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n", vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), + ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, aq_err)); if (!aq_tmp) @@ -1339,13 +1340,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, * Called from the VF to configure the promiscuous mode of * VF vsis and from the VF reset path to reset promiscuous mode. **/ -static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, - u16 vsi_id, - bool allmulti, - bool alluni) +static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, + u16 vsi_id, + bool allmulti, + bool alluni) { - i40e_status aq_ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; + int aq_ret = I40E_SUCCESS; struct i40e_vsi *vsi; u16 num_vlans; s16 *vl; @@ -1955,7 +1956,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, struct i40e_pf *pf; struct i40e_hw *hw; int abs_vf_id; - i40e_status aq_ret; + int aq_ret; /* validate the request */ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) @@ -1987,7 +1988,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, enum virtchnl_ops opcode, - i40e_status retval) + int retval) { return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); } @@ -2091,9 +2092,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; struct i40e_vsi *vsi; int num_vsis = 1; + int aq_ret = 0; size_t len = 0; int ret; @@ -2123,11 +2124,11 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; if (i40e_vf_client_capable(pf, vf->vf_id) && - (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; - set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; + set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); } else { - clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); + clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states); } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -2221,9 +2222,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; bool allmulti = false; bool alluni = false; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -2308,10 +2309,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_queue_pair_info *qpi; u16 vsi_id, vsi_queue_id = 0; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; int i, j = 0, idx = 0; struct i40e_vsi *vsi; u16 num_qps_all = 0; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -2458,8 +2459,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; struct virtchnl_vector_map *map; + int aq_ret = 0; u16 vsi_id; - i40e_status aq_ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -2574,7 +2575,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; + int aq_ret = 0; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { @@ -2632,7 +2633,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -2783,7 +2784,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; struct i40e_eth_stats stats; - i40e_status aq_ret = 0; + int aq_ret = 0; struct i40e_vsi *vsi; memset(&stats, 0, sizeof(struct i40e_eth_stats)); @@ -2926,7 +2927,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status ret = 0; + int ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -2998,7 +2999,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) bool was_unimac_deleted = false; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status ret = 0; + int ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3071,7 +3072,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status aq_ret = 0; + int aq_ret = 0; int i; if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && @@ -3142,7 +3143,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status aq_ret = 0; + int aq_ret = 0; int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3187,21 +3188,21 @@ error_param: } /** - * i40e_vc_iwarp_msg + * i40e_vc_rdma_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * * called from the VF for the iwarp msgs **/ -static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { struct i40e_pf *pf = vf->pf; int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; - i40e_status aq_ret = 0; + int aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { + !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -3211,42 +3212,42 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA, aq_ret); } /** - * i40e_vc_iwarp_qvmap_msg + * i40e_vc_rdma_qvmap_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @config: config qvmap or release it * * called from the VF for the iwarp msgs **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) +static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) { - struct virtchnl_iwarp_qvlist_info *qvlist_info = - (struct virtchnl_iwarp_qvlist_info *)msg; - i40e_status aq_ret = 0; + struct virtchnl_rdma_qvlist_info *qvlist_info = + (struct virtchnl_rdma_qvlist_info *)msg; + int aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { + !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } if (config) { - if (i40e_config_iwarp_qvlist(vf, qvlist_info)) + if (i40e_config_rdma_qvlist(vf, qvlist_info)) aq_ret = I40E_ERR_PARAM; } else { - i40e_release_iwarp_qvlist(vf); + i40e_release_rdma_qvlist(vf); } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, - config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : + VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, aq_ret); } @@ -3263,7 +3264,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) (struct virtchnl_rss_key *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status aq_ret = 0; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || @@ -3293,7 +3294,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) (struct virtchnl_rss_lut *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status aq_ret = 0; + int aq_ret = 0; u16 i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3328,7 +3329,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) { struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; + int aq_ret = 0; int len = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3365,7 +3366,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) (struct virtchnl_rss_hena *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret = 0; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -3389,8 +3390,8 @@ err: **/ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { - i40e_status aq_ret = 0; struct i40e_vsi *vsi; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -3415,8 +3416,8 @@ err: **/ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { - i40e_status aq_ret = 0; struct i40e_vsi *vsi; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -3615,8 +3616,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf) ret = i40e_add_del_cloud_filter(vsi, cfilter, false); if (ret) dev_err(&pf->pdev->dev, - "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", - vf->vf_id, i40e_stat_str(&pf->hw, ret), + "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", + vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); @@ -3642,7 +3643,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; struct hlist_node *node; - i40e_status aq_ret = 0; + int aq_ret = 0; int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3718,8 +3719,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); if (ret) { dev_err(&pf->pdev->dev, - "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", - vf->vf_id, i40e_stat_str(&pf->hw, ret), + "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", + vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err; } @@ -3773,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_cloud_filter *cfilter = NULL; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - i40e_status aq_ret = 0; + int aq_ret = 0; int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3852,8 +3853,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) ret = i40e_add_del_cloud_filter(vsi, cfilter, true); if (ret) { dev_err(&pf->pdev->dev, - "VF %d: Failed to add cloud filter, err %s aq_err %s\n", - vf->vf_id, i40e_stat_str(&pf->hw, ret), + "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", + vf->vf_id, ERR_PTR(ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err_free; } @@ -3882,7 +3883,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_link_status *ls = &pf->hw.phy.link_info; int i, adq_request_qps = 0; - i40e_status aq_ret = 0; + int aq_ret = 0; u64 speed = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3994,7 +3995,7 @@ err: static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_pf *pf = vf->pf; - i40e_status aq_ret = 0; + int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; @@ -4112,14 +4113,14 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg); break; - case VIRTCHNL_OP_IWARP: - ret = i40e_vc_iwarp_msg(vf, msg, msglen); + case VIRTCHNL_OP_RDMA: + ret = i40e_vc_rdma_msg(vf, msg, msglen); break; - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); + case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: + ret = i40e_vc_rdma_qvmap_msg(vf, msg, true); break; - case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); + case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: + ret = i40e_vc_rdma_qvmap_msg(vf, msg, false); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ret = i40e_vc_config_rss_key(vf, msg); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 358bbdb58795..895b8feb2567 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -34,7 +34,7 @@ enum i40e_queue_ctrl { enum i40e_vf_states { I40E_VF_STATE_INIT = 0, I40E_VF_STATE_ACTIVE, - I40E_VF_STATE_IWARPENA, + I40E_VF_STATE_RDMAENA, I40E_VF_STATE_DISABLED, I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, @@ -46,7 +46,7 @@ enum i40e_vf_states { enum i40e_vf_capabilities { I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, I40E_VIRTCHNL_VF_CAP_L2, - I40E_VIRTCHNL_VF_CAP_IWARP, + I40E_VIRTCHNL_VF_CAP_RDMA, }; /* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. @@ -108,7 +108,7 @@ struct i40e_vf { u16 num_cloud_filters; /* RDMA Client */ - struct virtchnl_iwarp_qvlist_info *qvlist_info; + struct virtchnl_rdma_qvlist_info *qvlist_info; }; void i40e_free_vfs(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 0d1bab4ac1b0..232bc61d9eee 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -30,6 +30,7 @@ #include <linux/jiffies.h> #include <net/ip6_checksum.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/udp.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@ -249,6 +250,7 @@ struct iavf_cloud_filter { /* board specific private data structure */ struct iavf_adapter { + struct workqueue_struct *wq; struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; @@ -275,8 +277,8 @@ struct iavf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; - int num_iwarp_msix; - int iwarp_base_vector; + int num_rdma_msix; + int rdma_base_vector; u32 client_pending; struct iavf_client_instance *cinst; struct msix_entry *msix_entries; @@ -383,7 +385,7 @@ struct iavf_adapter { enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_IWARP : \ + VIRTCHNL_VF_OFFLOAD_RDMA : \ 0) #define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ @@ -459,7 +461,6 @@ struct iavf_device { /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; -extern struct workqueue_struct *iavf_wq; static inline const char *iavf_state_str(enum iavf_state_t state) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index 0c77e4171808..93c903c02c64 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -127,7 +127,7 @@ void iavf_notify_client_open(struct iavf_vsi *vsi) } /** - * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map + * iavf_client_release_qvlist - send a message to the PF to release rdma qv map * @ldev: pointer to L2 context. * * Return 0 on success or < 0 on error @@ -141,12 +141,12 @@ static int iavf_client_release_qvlist(struct iavf_info *ldev) return -EAGAIN; err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, IAVF_SUCCESS, NULL, 0, NULL); if (err) dev_err(&adapter->pdev->dev, - "Unable to send iWarp vector release message to PF, error %d, aq status %d\n", + "Unable to send RDMA vector release message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); return err; @@ -215,9 +215,9 @@ iavf_client_add_instance(struct iavf_adapter *adapter) cinst->lan_info.params = params; set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); - cinst->lan_info.msix_count = adapter->num_iwarp_msix; + cinst->lan_info.msix_count = adapter->num_rdma_msix; cinst->lan_info.msix_entries = - &adapter->msix_entries[adapter->iwarp_base_vector]; + &adapter->msix_entries[adapter->rdma_base_vector]; mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); @@ -425,17 +425,17 @@ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, if (adapter->aq_required) return -EAGAIN; - err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, + err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA, IAVF_SUCCESS, msg, len, NULL); if (err) - dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", + dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); return err; } /** - * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map * @ldev: pointer to L2 context. * @client: Client pointer. * @qvlist_info: queue and vector list @@ -446,7 +446,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev, struct iavf_client *client, struct iavf_qvlist_info *qvlist_info) { - struct virtchnl_iwarp_qvlist_info *v_qvlist_info; + struct virtchnl_rdma_qvlist_info *v_qvlist_info; struct iavf_adapter *adapter = ldev->vf; struct iavf_qv_info *qv_info; enum iavf_status err; @@ -463,23 +463,23 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev, continue; v_idx = qv_info->v_idx; if ((v_idx >= - (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || - (v_idx < adapter->iwarp_base_vector)) + (adapter->rdma_base_vector + adapter->num_rdma_msix)) || + (v_idx < adapter->rdma_base_vector)) return -EINVAL; } - v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; + v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info; msg_size = struct_size(v_qvlist_info, qv_info, v_qvlist_info->num_vectors - 1); - adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); + adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP); err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS, + VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); if (err) { dev_err(&adapter->pdev->dev, - "Unable to send iWarp vector config message to PF, error %d, aq status %d\n", + "Unable to send RDMA vector config message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); goto out; } @@ -488,7 +488,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev, for (i = 0; i < 5; i++) { msleep(100); if (!(adapter->client_pending & - BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { + BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) { err = 0; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index 9a7cf39ea75a..c5d51d7dc7cc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -159,7 +159,7 @@ struct iavf_client { #define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; -#define IAVF_CLIENT_IWARP 0 +#define IAVF_CLIENT_RDMA 0 struct iavf_client_ops *ops; /* client ops provided by the client */ }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 34e46a23894f..16c490965b61 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -223,8 +223,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) return "IAVF_ERR_ADMIN_QUEUE_FULL"; case IAVF_ERR_ADMIN_QUEUE_NO_WORK: return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; - case IAVF_ERR_BAD_IWARP_CQE: - return "IAVF_ERR_BAD_IWARP_CQE"; + case IAVF_ERR_BAD_RDMA_CQE: + return "IAVF_ERR_BAD_RDMA_CQE"; case IAVF_ERR_NVM_BLANK_MODE: return "IAVF_ERR_NVM_BLANK_MODE"; case IAVF_ERR_NOT_IMPLEMENTED: diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index d79ead5e8d0c..6f171d1d85b7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) if (changed_flags & IAVF_FLAG_LEGACY_RX) { if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } } @@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev, if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } return 0; @@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) @@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return err; } @@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); if (!err) - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); mutex_unlock(&adapter->crit_lock); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index c4e451ef7942..3273aeb8fa67 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver") MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; -struct workqueue_struct *iavf_wq; int iavf_status_to_errno(enum iavf_status status) { @@ -106,7 +105,7 @@ int iavf_status_to_errno(enum iavf_status status) case IAVF_ERR_SRQ_ENABLED: case IAVF_ERR_ADMIN_QUEUE_ERROR: case IAVF_ERR_ADMIN_QUEUE_FULL: - case IAVF_ERR_BAD_IWARP_CQE: + case IAVF_ERR_BAD_RDMA_CQE: case IAVF_ERR_NVM_BLANK_MODE: case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: case IAVF_ERR_DIAG_TEST_FAILED: @@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) if (!(adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } } @@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) void iavf_schedule_request_stats(struct iavf_adapter *adapter) { adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) if (adapter->state != __IAVF_REMOVE) /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter, /* schedule the watchdog task to immediately process the request */ if (f) { - queue_work(iavf_wq, &adapter->watchdog_task.work); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return 0; } return -ENOMEM; @@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter, if (aq_required) { adapter->aq_required |= aq_required; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } } @@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; } + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && + adapter->netdev_registered && + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && + rtnl_trylock()) { + netdev_update_features(adapter->netdev); + rtnl_unlock(); + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); @@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); return; } @@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_FAILED: @@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; } /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, @@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + HZ * 2); return; case __IAVF_DOWN: case __IAVF_DOWN_PENDING: @@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; } @@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work) mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->state >= __IAVF_DOWN) - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + HZ * 2); } /** @@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work) */ if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); goto reset_finish; } @@ -3116,7 +3126,7 @@ continue_reset: bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some * state here. @@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->state == __IAVF_REMOVE) return; - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); goto out; } @@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work) } while (pending); mutex_unlock(&adapter->crit_lock); - if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { - if (adapter->netdev_registered || - !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - netdev_update_features(netdev); - rtnl_unlock(); - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features - (adapter, 0, netdev->features); - - iavf_set_queue_vlan_tag_loc(adapter); - } - - adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; - } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -3850,7 +3842,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", - be32_to_cpu(match.mask->dst)); + be32_to_cpu(match.mask->src)); return -EINVAL; } } @@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } return 0; @@ -4876,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_reg; } - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), @@ -4898,6 +4888,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; + adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + iavf_driver_name); + if (!adapter->wq) { + err = -ENOMEM; + goto err_alloc_wq; + } + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; iavf_change_state(adapter, __IAVF_STARTUP); @@ -4942,7 +4939,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ @@ -4954,9 +4951,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_ioremap: + destroy_workqueue(adapter->wq); +err_alloc_wq: free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: @@ -5023,7 +5021,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d) return err; } - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); netif_device_attach(adapter->netdev); @@ -5170,9 +5168,9 @@ static void iavf_remove(struct pci_dev *pdev) } spin_unlock_bh(&adapter->adv_rss_lock); - free_netdev(netdev); + destroy_workqueue(adapter->wq); - pci_disable_pcie_error_reporting(pdev); + free_netdev(netdev); pci_disable_device(pdev); } @@ -5196,24 +5194,11 @@ static struct pci_driver iavf_driver = { **/ static int __init iavf_init_module(void) { - int ret; - pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); - iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - iavf_driver_name); - if (!iavf_wq) { - pr_err("%s: Failed to create workqueue\n", iavf_driver_name); - return -ENOMEM; - } - - ret = pci_register_driver(&iavf_driver); - if (ret) - destroy_workqueue(iavf_wq); - - return ret; + return pci_register_driver(&iavf_driver); } module_init(iavf_init_module); @@ -5227,7 +5212,6 @@ module_init(iavf_init_module); static void __exit iavf_exit_module(void) { pci_unregister_driver(&iavf_driver); - destroy_workqueue(iavf_wq); } module_exit(iavf_exit_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 2ea5c7c339bc..0e493ee9e9d1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -64,7 +64,7 @@ enum iavf_status { IAVF_ERR_BUF_TOO_SHORT = -55, IAVF_ERR_ADMIN_QUEUE_FULL = -56, IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57, - IAVF_ERR_BAD_IWARP_CQE = -58, + IAVF_ERR_BAD_RDMA_CQE = -58, IAVF_ERR_NVM_BLANK_MODE = -59, IAVF_ERR_NOT_IMPLEMENTED = -60, IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 24a701fd140e..6d23338604bb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } break; default: @@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_process_config(adapter); adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; + + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features(adapter, 0, + netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + was_mac_changed = !ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr); @@ -2290,7 +2298,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; - case VIRTCHNL_OP_IWARP: + case VIRTCHNL_OP_RDMA: /* Gobble zero-length replies from the PF. They indicate that * a previous message was received OK, and the client doesn't * care about that. @@ -2299,9 +2307,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_notify_client_message(&adapter->vsi, msg, msglen); break; - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: adapter->client_pending &= - ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); + ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP)); break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 9183d480b70b..5d89392f969b 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -28,6 +28,7 @@ ice-y := ice_main.o \ ice_flow.o \ ice_idc.o \ ice_devlink.o \ + ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ ice_ethtool.o \ @@ -42,8 +43,8 @@ ice-$(CONFIG_PCI_IOV) += \ ice_vf_vsi_vlan_ops.o \ ice_vf_lib.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o -ice-$(CONFIG_TTY) += ice_gnss.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o +ice-$(CONFIG_GNSS) += ice_gnss.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2f0b604abc5e..b0e29e342401 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -39,7 +39,9 @@ #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> #include <linux/dim.h> +#include <linux/gnss.h> #include <net/pkt_cls.h> +#include <net/pkt_sched.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_gact.h> #include <net/ip.h> @@ -121,6 +123,8 @@ #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) +#define ICE_MAX_TSO_SIZE 131072 + #define ICE_UP_TABLE_TRANSLATE(val, i) \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ ICE_AQ_VSI_UP_TABLE_UP##i##_M) @@ -352,7 +356,6 @@ struct ice_vsi { struct ice_vf *vf; /* VF associated with this VSI */ - u16 ethtype; /* Ethernet protocol for pause frame */ u16 num_gfltr; u16 num_bfltr; @@ -565,9 +568,8 @@ struct ice_pf { struct mutex adev_mutex; /* lock to protect aux device access */ u32 msg_enable; struct ice_ptp ptp; - struct tty_driver *ice_gnss_tty_driver; - struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES]; - struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES]; + struct gnss_serial *gnss_serial; + struct gnss_device *gnss_dev; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ u16 rdma_base_vector; @@ -880,7 +882,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); -int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_pf *pf); void @@ -889,7 +891,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); -int ice_vsi_cfg(struct ice_vsi *vsi); +int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); @@ -907,6 +909,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); +void ice_deinit_rdma(struct ice_pf *pf); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); @@ -931,6 +934,8 @@ int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +int ice_load(struct ice_pf *pf); +void ice_unload(struct ice_pf *pf); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 958c1e435232..838d9b274d68 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib { #define ICE_AQ_LLDP_TX_ACTIVE 0 #define ICE_AQ_LLDP_TX_SUSPENDED 1 #define ICE_AQ_LLDP_TX_FLUSHED 3 +/* DCBX mode */ +#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6) +#define ICE_AQ_LLDP_DCBX_NA 0 +#define ICE_AQ_LLDP_DCBX_CEE 1 +#define ICE_AQ_LLDP_DCBX_IEEE 2 + + u8 state; +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0) +#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0 +#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1 + /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. */ - u8 reserved1; __le16 local_len; __le16 remote_len; - u8 reserved2[2]; + u8 reserved[2]; __le32 addr_high; __le32 addr_low; }; @@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change { u8 command; #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 +#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1) +#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0 +#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1 u8 reserved[15]; }; @@ -2329,6 +2342,7 @@ enum ice_adminq_opc { ice_aqc_opc_lldp_set_local_mib = 0x0A08, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_filter_ctrl = 0x0A0A, + ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 554095b25f44..1911d644dfa8 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) { if (ice_ring_uses_build_skb(rx_ring)) return ICE_SKB_PAD; - else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) - return XDP_PACKET_HEADROOM; - return 0; } @@ -495,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); - u16 num_bufs = ICE_DESC_UNUSED(ring); + u32 num_bufs = ICE_RX_DESC_UNUSED(ring); int err; ring->rx_buf_len = ring->vsi->rx_buf_len; @@ -503,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (ring->vsi->type == ICE_VSI_PF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ - xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, ring->q_vector->napi.napi_id); + __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->q_index, + ring->q_vector->napi.napi_id, + ring->vsi->rx_buf_len); ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { @@ -524,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } else { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ - xdp_rxq_info_reg(&ring->xdp_rxq, - ring->netdev, - ring->q_index, ring->q_vector->napi.napi_id); + __xdp_rxq_info_reg(&ring->xdp_rxq, + ring->netdev, + ring->q_index, + ring->q_vector->napi.napi_id, + ring->vsi->rx_buf_len); err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, @@ -536,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } } + xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); + ring->xdp.data = NULL; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d02b55b6aa9c..c2fda4fa4188 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -208,6 +208,31 @@ bool ice_is_e810t(struct ice_hw *hw) } /** + * ice_is_e823 + * @hw: pointer to the hardware structure + * + * returns true if the device is E823-L or E823-C based, false if not. + */ +bool ice_is_e823(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; + } +} + +/** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * @@ -1088,8 +1113,10 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; - hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) + hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*hw->port_info), + GFP_KERNEL); if (!hw->port_info) { status = -ENOMEM; goto err_unroll_cqinit; @@ -1217,11 +1244,6 @@ void ice_deinit_hw(struct ice_hw *hw) ice_free_hw_tbls(hw); mutex_destroy(&hw->tnl_lock); - if (hw->port_info) { - devm_kfree(ice_hw_to_dev(hw), hw->port_info); - hw->port_info = NULL; - } - /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); ice_destroy_all_ctrlq(hw); @@ -5504,6 +5526,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) } /** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +int ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** * ice_fw_supports_report_dflt_cfg * @hw: pointer to the hardware structure * @@ -5524,7 +5559,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) * returned by the firmware is a 16 bit * value, but is indexed * by [fls(speed) - 1] */ -static const u32 ice_aq_to_link_speed[15] = { +static const u32 ice_aq_to_link_speed[] = { SPEED_10, /* BIT(0) */ SPEED_100, SPEED_1000, @@ -5536,10 +5571,6 @@ static const u32 ice_aq_to_link_speed[15] = { SPEED_40000, SPEED_50000, SPEED_100000, /* BIT(10) */ - 0, - 0, - 0, - 0 /* BIT(14) */ }; /** @@ -5550,5 +5581,8 @@ static const u32 ice_aq_to_link_speed[15] = { */ u32 ice_get_link_speed(u16 index) { + if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) + return 0; + return ice_aq_to_link_speed[index]; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 4c6a0b5c9304..8ba5f935a092 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, - enum ice_fc_mode fc); + enum ice_fc_mode req_mode); bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); @@ -199,6 +199,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e823(struct ice_hw *hw); int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -221,6 +222,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_execute_pending_mib(struct ice_hw *hw); int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 6be02f9b0b8c..c557dfc50aad 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, if (!ena_update) cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + else + cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M, + ICE_AQ_LLDP_MIB_PENDING_ENABLE); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -566,7 +569,7 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data * - * Currently only IEEE 802.1Qaz TLV is supported, all others + * Currently IEEE 802.1Qaz and CEE DCBX TLV are supported, others * will be returned */ static void @@ -585,7 +588,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) ice_parse_cee_tlv(tlv, dcbcfg); break; default: - break; + break; /* Other OUIs not supported */ } } @@ -964,6 +967,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi) } /** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event) +{ + struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + struct ice_aqc_lldp_get_mib *mib; + u8 change_type, dcbx_mode; + + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + + change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); + if (change_type == ICE_AQ_LLDP_MIB_REMOTE) + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + + dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type); + + switch (dcbx_mode) { + case ICE_AQ_LLDP_DCBX_IEEE: + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); + break; + + case ICE_AQ_LLDP_DCBX_CEE: + pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; + ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) + event->msg_buf, pi); + break; + } +} + +/** * ice_init_dcb * @hw: pointer to the HW struct * @enable_mib_change: enable MIB change event diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 6abf28a14291..be34650a77d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); int ice_get_dcb_cfg(struct ice_port_info *pi); int ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event); int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); int ice_query_port_ets(struct ice_port_info *pi, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 4f24d441c35e..c6d4926f0fcf 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) goto out; } - ice_pf_dcb_recfg(pf); + ice_pf_dcb_recfg(pf, false); out: /* enable previously downed VSIs */ @@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) /** * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs * @pf: pointer to the PF struct + * @locked: is adev device lock held * * Assumed caller has already disabled all VSIs before * calling this function. Reconfiguring DCB based on * local_dcbx_cfg. */ -void ice_pf_dcb_recfg(struct ice_pf *pf) +void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct iidc_event *event; @@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } - /* Notify the AUX drivers that TC change is finished */ - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return; + if (!locked) { + /* Notify the AUX drivers that TC change is finished */ + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return; - set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); - ice_send_event_to_aux(pf, event); - kfree(event); + set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + ice_send_event_to_aux(pf, event); + kfree(event); + } } /** @@ -859,7 +862,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) if (err) goto dcb_init_err; - return err; + return 0; dcb_init_err: dev_err(dev, "DCB init failed\n"); @@ -944,6 +947,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, } /** + * ice_dcb_is_mib_change_pending - Check if MIB change is pending + * @state: MIB change state + */ +static bool ice_dcb_is_mib_change_pending(u8 state) +{ + return ICE_AQ_LLDP_MIB_CHANGE_PENDING == + FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state); +} + +/** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf * @event: pointer to the admin queue receive event @@ -956,6 +969,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct device *dev = ice_pf_to_dev(pf); struct ice_aqc_lldp_get_mib *mib; struct ice_dcbx_cfg tmp_dcbx_cfg; + bool pending_handled = true; bool need_reconfig = false; struct ice_port_info *pi; u8 mib_type; @@ -972,41 +986,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, pi = pf->hw.port_info; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + /* Ignore if event is not for Nearest Bridge */ - mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M); + mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; + /* A pending change event contains accurate config information, and + * the FW setting has not been updaed yet, so detect if change is + * pending to determine where to pull config information from + * (FW vs event) + */ + if (ice_dcb_is_mib_change_pending(mib->state)) + pending_handled = false; + /* Check MIB Type and return if event for Remote MIB update */ - mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ - ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, - ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, - &pi->qos_cfg.remote_dcbx_cfg); - if (ret) { - dev_err(dev, "Failed to get remote DCB config\n"); - return; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = + ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, + &pi->qos_cfg.remote_dcbx_cfg); + if (ret) + dev_dbg(dev, "Failed to get remote DCB config\n"); } + return; } + /* That a DCB change has happened is now determined */ mutex_lock(&pf->tc_mutex); /* store the old configuration */ - tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg; + tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; /* Reset the old DCBX configuration data */ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(pi->qos_cfg.local_dcbx_cfg)); /* Get updated DCBX data from firmware */ - ret = ice_get_dcb_cfg(pf->hw.port_info); - if (ret) { - dev_err(dev, "Failed to get DCB config\n"); - goto out; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = ice_get_dcb_cfg(pi); + if (ret) { + dev_err(dev, "Failed to get DCB config\n"); + goto out; + } } /* No change detected in DCBX configs */ @@ -1033,18 +1064,24 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) { + ice_lldp_execute_pending_mib(&pf->hw); + pending_handled = true; + } + rtnl_lock(); /* disable VSIs affected by DCB changes */ ice_dcb_ena_dis_vsi(pf, false, true); - ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL); if (ret) { dev_err(dev, "Query Port ETS failed\n"); goto unlock_rtnl; } /* changes in configuration update VSI */ - ice_pf_dcb_recfg(pf); + ice_pf_dcb_recfg(pf, false); /* enable previously downed VSIs */ ice_dcb_ena_dis_vsi(pf, true, true); @@ -1052,4 +1089,8 @@ unlock_rtnl: rtnl_unlock(); out: mutex_unlock(&pf->tc_mutex); + + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) + ice_lldp_execute_pending_mib(&pf->hw); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 4c421c842a13..800879a88c5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg); -void ice_pf_dcb_recfg(struct ice_pf *pf); +void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); int ice_init_pf_dcb(struct ice_pf *pf, bool locked); void ice_update_dcb_stats(struct ice_pf *pf); @@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf) return 0; } -static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } +static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c new file mode 100644 index 000000000000..d71ed210f9c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -0,0 +1,1897 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include "ice_common.h" +#include "ice.h" +#include "ice_ddp.h" + +/* For supporting double VLAN mode, it is necessary to enable or disable certain + * boost tcam entries. The metadata labels names that match the following + * prefixes will be saved to allow enabling double VLAN mode. + */ +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" +static const struct ice_tunnel_type_scan tnls[] = { + { TNL_VXLAN, "TNL_VXLAN_PF" }, + { TNL_GENEVE, "TNL_GENEVE_PF" }, + { TNL_LAST, "" } +}; + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < struct_size(pkg, seg_offset, 1)) + return ICE_DDP_PKG_INVALID_FILE; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_DDP_PKG_INVALID_FILE; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_DDP_PKG_INVALID_FILE; + + /* make sure segment array fits in package length */ + if (len < struct_size(pkg, seg_offset, seg_count)) + return ICE_DDP_PKG_INVALID_FILE; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_DDP_PKG_INVALID_FILE; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms = (struct ice_nvm_table *) + (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *)(nvms->vers + + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = + ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, + struct ice_pkg_enum *state, u32 sect_type, + u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_sw_fv_section *fv_section = section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= le16_to_cpu(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = le16_to_cpu(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in HW for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + + do { + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return 0; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, + bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, + u32 index, u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_add_dvm_hint + * @hw: pointer to the HW structure + * @val: value of the boost entry + * @enable: true if entry needs to be enabled, or false if needs to be disabled + */ +static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) +{ + if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { + hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; + hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; + hw->dvm_upd.count++; + } +} + +/** + * ice_add_tunnel_hint + * @hw: pointer to the HW structure + * @label_name: label text + * @val: value of the tunnel port boost entry + */ +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +{ + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + u16 i; + + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + } +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void *ice_label_enum_handler(u32 __always_unused sect_type, + void *section, u32 index, u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = section; + if (index >= le16_to_cpu(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, + struct ice_pkg_enum *state, u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = ice_pkg_enum_entry(ice_seg, state, type, NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = le16_to_cpu(label->value); + return label->name; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = section; + if (index >= le16_to_cpu(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return -EINVAL; + + do { + tcam = ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && le16_to_cpu(tcam->addr) == addr) { + *entry = tcam; + return 0; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return -EIO; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = + cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); + return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ + return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && + prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || + prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ + switch (prof_idx) { + case ICE_PROFID_IPV4_GTPC_TEID: + case ICE_PROFID_IPV4_GTPC_NO_TEID: + case ICE_PROFID_IPV6_GTPC_TEID: + case ICE_PROFID_IPV6_GTPC_NO_TEID: + return true; + default: + return false; + } +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, + struct ice_fv *fv, u32 prof_idx) +{ + u16 i; + + if (ice_is_gtp_c_profile(prof_idx)) + return ICE_PROF_TUN_GTPC; + + if (ice_is_gtp_u_profile(prof_idx)) + return ICE_PROF_TUN_GTPU; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return ICE_PROF_NON_TUN; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + unsigned long *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + if (req_profs == ICE_PROF_ALL) { + bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); + return; + } + + memset(&state, 0, sizeof(state)); + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv, offset); + + if (req_profs & prof_type) + set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: list of protocol types + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + unsigned long *bm, struct list_head *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!lkups->n_val_words || !hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + do { + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!test_bit((u16)offset, bm)) + continue; + + for (i = 0; i < lkups->n_val_words; i++) { + int j; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == + lkups->fv_words[i].prot_id && + fv->ew[j].off == lkups->fv_words[i].off) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == lkups->n_val_words) { + fvl = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fvl), GFP_KERNEL); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + list_add(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (list_empty(fv_list)) { + dev_warn(ice_hw_to_dev(hw), + "Required profiles not found in currently loaded DDP package"); + return -EIO; + } + + return 0; + +err: + list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { + list_del(&fvl->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvl); + } + + return -ENOMEM; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + bitmap_zero(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + set_bit(i, hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + devm_kfree(ice_hw_to_dev(hw), bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return -EINVAL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = le16_to_cpu(buf->section_count); + if (section_count > 0) + return -EIO; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return -EIO; + bld->reserved_section_table_entries += count; + + data_end = le16_to_cpu(buf->data_end) + + flex_array_size(buf, section_entry, count); + buf->data_end = cpu_to_le16(data_end); + + return 0; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = le16_to_cpu(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = le16_to_cpu(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = cpu_to_le16(data_end); + buf->section_entry[sect_count].size = cpu_to_le16(size); + buf->section_entry[sect_count].type = cpu_to_le32(type); + + data_end += size; + buf->data_end = cpu_to_le16(data_end); + + buf->section_count = cpu_to_le16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, + u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return le16_to_cpu(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (!bld) + return NULL; + + return &bld->buf; +} + +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_LOAD_ERROR; + default: + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + int status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == -EALREADY) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, + struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + enum ice_aq_err err; + u32 offset, info, i; + int status; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (le16_to_cpu(bh->section_count)) + if (le32_to_cpu(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); + break; + } + + if (last) + break; + } + + if (!status) { + status = ice_set_vlan_mode(hw); + if (status) + ice_debug(hw, ICE_DBG_PKG, + "Failed to set VLAN mode: err %d\n", status); + } + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, + struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + int status; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return status; +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status = 0; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + + ice_release_change_lock(hw); + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr + *)((u8 *)pkg_hdr + + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, + struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_DDP_PKG_ERR; + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + /* Get package information from the Metadata Section */ + meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice metadata section in package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + hw->pkg_ver = meta->ver; + memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_aqc_get_pkg_info_resp *pkg_info; + u16 size; + u32 i; + + size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); + pkg_info = kzalloc(size, GFP_KERNEL); + if (!pkg_info) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto init_pkg_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + le32_to_cpu(pkg_info->pkg_info[i].track_id); + memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, + pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + kfree(pkg_info); + + return state; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, + struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_ddp_state state; + u16 size; + u32 i; + + /* Check package version compatibility */ + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return state; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + /* Check if FW is compatible with the OS package */ + size = struct_size(pkg, pkg_info, ICE_PKG_CNT); + pkg = kzalloc(size, GFP_KERNEL); + if (!pkg) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_LOAD_ERROR; + goto fw_ddp_compat_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + state = ICE_DDP_PKG_FW_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, + "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + kfree(pkg); + return state; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + memset(&hw->tnl, 0, sizeof(hw->tnl)); + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); + + /* check for a dvm mode entry */ + else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) + ice_add_dvm_hint(hw, val, true); + + /* check for a svm mode entry */ + else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) + ice_add_dvm_hint(hw, val, false); + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers for tunnels */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) { + hw->tnl.tbl[i].valid = true; + if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) + hw->tnl.valid_count[hw->tnl.tbl[i].type]++; + } + } + + /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ + for (i = 0; i < hw->dvm_upd.count; i++) + ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, + &hw->dvm_upd.tbl[i].boost_entry); +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + bool already_loaded = false; + enum ice_ddp_state state; + struct ice_pkg_hdr *pkg; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + pkg = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return state; + } + + /* initialize package info */ + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; + + /* before downloading the package, check package version for + * compatibility with driver + */ + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + state = ice_download_pkg(hw, seg); + if (state == ICE_DDP_PKG_ALREADY_LOADED) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + already_loaded = true; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); + } + + if (ice_is_init_pkg_successful(state)) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); + ice_get_prof_index_max(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); + } + + return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, + u32 len) +{ + enum ice_ddp_state state; + u8 *buf_copy; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return state; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h new file mode 100644 index 000000000000..37eadb3d27a8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -0,0 +1,445 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022, Intel Corporation. */ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +#define ICE_FV_OFFSET_INVAL 0x1FF + +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +} __packed; + +#define ICE_MAX_NUM_PROFILES 256 + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* The signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + + /* An error occurred in firmware while loading the DDP package */ + ICE_DDP_PKG_LOAD_ERROR = -11, + + /* Other errors */ + ICE_DDP_PKG_ERR = -12 +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[]; +}; + +struct ice_run_time_cfg_seg { + struct ice_generic_seg_hdr hdr; + u8 rsvd[8]; + struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \ + ((ICE_PKG_BUF_SIZE - \ + struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \ + (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 + +struct ice_meta_sect { + struct ice_pkg_ver ver; +#define ICE_META_SECT_NAME_SIZE 28 + char name[ICE_META_SECT_NAME_SIZE]; + __le32 track_id; +}; + +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[]; +}; + +#define ICE_MAX_LABELS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \ + label, 1) - \ + sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[]; +}; + +struct ice_sw_fv_list_entry { + struct list_head list_entry; + u32 profile_id; + struct ice_fv *fv_ptr; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +} __packed; + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \ + tcam, 1) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF( \ + struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \ + 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[]; +}; + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[]; +}; + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[]; +}; + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd); +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd); + +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); + +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); + +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr); + +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); + +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type); + +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 8286e47b4bae..05f216af8c81 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -371,10 +371,7 @@ out_free_ctx: /** * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware - * @devlink: pointer to the devlink instance to reload - * @netns_change: if true, the network namespace is changing - * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE - * @limit: limits on what reload should do, such as not resetting + * @pf: pointer to the pf instance * @extack: netlink extended ACK structure * * Allow user to activate new Embedded Management Processor firmware by @@ -387,12 +384,9 @@ out_free_ctx: * any source. */ static int -ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, - enum devlink_reload_action action, - enum devlink_reload_limit limit, +ice_devlink_reload_empr_start(struct ice_pf *pf, struct netlink_ext_ack *extack) { - struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; u8 pending; @@ -431,11 +425,51 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, } /** + * ice_devlink_reload_down - prepare for reload + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform + * @limit: limits on what reload should do, such as not resetting + * @extack: netlink extended ACK structure + */ +static int +ice_devlink_reload_down(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + if (ice_is_eswitch_mode_switchdev(pf)) { + NL_SET_ERR_MSG_MOD(extack, + "Go to legacy mode before doing reinit\n"); + return -EOPNOTSUPP; + } + if (ice_is_adq_active(pf)) { + NL_SET_ERR_MSG_MOD(extack, + "Turn off ADQ before doing reinit\n"); + return -EOPNOTSUPP; + } + if (ice_has_vfs(pf)) { + NL_SET_ERR_MSG_MOD(extack, + "Remove all VFs before doing reinit\n"); + return -EOPNOTSUPP; + } + ice_unload(pf); + return 0; + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: + return ice_devlink_reload_empr_start(pf, extack); + default: + WARN_ON(1); + return -EOPNOTSUPP; + } +} + +/** * ice_devlink_reload_empr_finish - Wait for EMP reset to finish - * @devlink: pointer to the devlink instance reloading - * @action: the action requested - * @limit: limits imposed by userspace, such as not resetting - * @actions_performed: on return, indicate what actions actually performed + * @pf: pointer to the pf instance * @extack: netlink extended ACK structure * * Wait for driver to finish rebuilding after EMP reset is completed. This @@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, * for the driver's rebuild to complete. */ static int -ice_devlink_reload_empr_finish(struct devlink *devlink, - enum devlink_reload_action action, - enum devlink_reload_limit limit, - u32 *actions_performed, +ice_devlink_reload_empr_finish(struct ice_pf *pf, struct netlink_ext_ack *extack) { - struct ice_pf *pf = devlink_priv(devlink); int err; - *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); - err = ice_wait_for_reset(pf, 60 * HZ); if (err) { NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); @@ -899,7 +927,7 @@ static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched { int status; - if (node->tx_priority >= 8) { + if (priority >= 8) { NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); return -EINVAL; } @@ -929,7 +957,7 @@ static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_n { int status; - if (node->tx_weight > 200 || node->tx_weight < 1) { + if (weight > 200 || weight < 1) { NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); return -EINVAL; } @@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, return status; } +/** + * ice_devlink_reload_up - do reload up after reinit + * @devlink: pointer to the devlink instance reloading + * @action: the action requested + * @limit: limits imposed by userspace, such as not resetting + * @actions_performed: on return, indicate what actions actually performed + * @extack: netlink extended ACK structure + */ +static int +ice_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); + return ice_load(pf); + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); + return ice_devlink_reload_empr_finish(pf, extack); + default: + WARN_ON(1); + return -EOPNOTSUPP; + } +} + static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, - .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), /* The ice driver currently does not support driver reinit */ - .reload_down = ice_devlink_reload_empr_start, - .reload_up = ice_devlink_reload_empr_finish, + .reload_down = ice_devlink_reload_down, + .reload_up = ice_devlink_reload_up, .port_split = ice_devlink_port_split, .port_unsplit = ice_devlink_port_unsplit, .eswitch_mode_get = ice_eswitch_mode_get, @@ -1376,7 +1435,6 @@ void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - devlink_set_features(devlink, DEVLINK_F_RELOAD); devlink_register(devlink); } @@ -1411,25 +1469,9 @@ ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - union devlink_param_value value; - int err; - err = devlink_params_register(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); - if (err) - return err; - - value.vbool = false; - devlink_param_driverinit_value_set(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, - value); - - value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false; - devlink_param_driverinit_value_set(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, - value); - - return 0; + return devlink_params_register(devlink, ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); } void ice_devlink_unregister_params(struct ice_pf *pf) diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index f9f15acae90a..f6dd3f8fd936 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -71,17 +71,17 @@ void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) if (!ice_is_switchdev_running(vf->pf)) return; - if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { + if (is_valid_ether_addr(vf->hw_lan_addr)) { err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, - vf->hw_lan_addr.addr); + vf->hw_lan_addr); if (err) { dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", - vf->hw_lan_addr.addr, vf->vf_id, err); + vf->hw_lan_addr, vf->vf_id, err); return; } vf->num_mac++; - ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); + ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); } } @@ -237,7 +237,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); netif_napi_del(&vf->repr->q_vector->napi); @@ -265,14 +265,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) GFP_KERNEL); if (!vf->repr->dst) { ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, + vf->hw_lan_addr, ICE_FWD_TO_VSI); goto err; } if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, + vf->hw_lan_addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; @@ -281,7 +281,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) if (ice_vsi_add_vlan_zero(vsi)) { ice_fltr_add_mac_and_broadcast(vsi, - vf->hw_lan_addr.addr, + vf->hw_lan_addr, ICE_FWD_TO_VSI); metadata_dst_free(vf->repr->dst); vf->repr->dst = NULL; @@ -338,7 +338,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); + ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf->vf_id); } @@ -425,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf) static struct ice_vsi * ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL); + struct ice_vsi_cfg_params params = {}; + + params.type = ICE_VSI_SWITCHDEV_CTRL; + params.pi = pi; + params.flags = ICE_VSI_FLAG_INIT; + + return ice_vsi_setup(pf, ¶ms); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 4191994d8f3a..b360bd8f1599 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) if (status) goto err_setup_rx_ring; - status = ice_vsi_cfg(vsi); + status = ice_vsi_cfg_lan(vsi); if (status) goto err_setup_rx_ring; @@ -664,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) if (status) goto err_start_rx_ring; - return status; + return 0; err_start_rx_ring: ice_vsi_free_rx_rings(vsi); @@ -1950,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ICE_PHY_TYPE_LOW_100G_CAUI4 | ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | ICE_PHY_TYPE_LOW_100G_AUI4 | - ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | - ICE_PHY_TYPE_LOW_100GBASE_CP2; + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4; phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | ICE_PHY_TYPE_HIGH_100G_CAUI2 | ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | @@ -1964,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev, 100000baseCR4_Full); } - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | - ICE_PHY_TYPE_LOW_100GBASE_SR2; - if (phy_types_low & phy_type_mask_lo) { + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseCR2_Full); + } + + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseSR4_Full); ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 100000baseSR4_Full); } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseSR2_Full); + } + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | ICE_PHY_TYPE_LOW_100GBASE_DR; if (phy_types_low & phy_type_mask_lo) { @@ -1984,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; - phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; - if (phy_types_low & phy_type_mask_lo || - phy_types_high & phy_type_mask_hi) { + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseKR4_Full); ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 100000baseKR4_Full); } + + if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseKR2_Full); + } + } #define TEST_SET_BITS_TIMEOUT 50 @@ -2242,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 100baseT_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseKX_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 2500baseX_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2261,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) if (ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseKR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseSR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseLR_Full)) @@ -2287,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) if (ethtool_link_ksettings_test_link_mode(ks, advertising, 50000baseCR2_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseKR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseKR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 50000baseSR2_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2299,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) ethtool_link_ksettings_test_link_mode(ks, advertising, 100000baseLR4_ER4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR4_Full)) + 100000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseCR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseSR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR2_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; return adv_link_speed; @@ -3027,8 +3046,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, /* clone ring and setup updated count */ xdp_rings[i] = *vsi->xdp_rings[i]; xdp_rings[i].count = new_tx_cnt; - xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1; - xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1; xdp_rings[i].desc = NULL; xdp_rings[i].tx_buf = NULL; err = ice_setup_tx_ring(&xdp_rings[i]); @@ -3073,7 +3090,7 @@ process_rx: /* allocate Rx buffers */ err = ice_alloc_rx_bufs(&rx_rings[i], - ICE_DESC_UNUSED(&rx_rings[i])); + ICE_RX_DESC_UNUSED(&rx_rings[i])); rx_unwind: if (err) { while (i) { @@ -3641,7 +3658,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; int new_rx = 0, new_tx = 0; + bool locked = false; u32 curr_combined; + int ret = 0; /* do not support changing channels in Safe Mode */ if (ice_is_safe_mode(pf)) { @@ -3705,15 +3724,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EINVAL; } - ice_vsi_recfg_qs(vsi, new_rx, new_tx); + if (pf->adev) { + mutex_lock(&pf->adev_mutex); + device_lock(&pf->adev->dev); + locked = true; + if (pf->adev->dev.driver) { + netdev_err(dev, "Cannot change channels when RDMA is active\n"); + ret = -EBUSY; + goto adev_unlock; + } + } + + ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked); - if (!netif_is_rxfh_configured(dev)) - return ice_vsi_set_dflt_rss_lut(vsi, new_rx); + if (!netif_is_rxfh_configured(dev)) { + ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx); + goto adev_unlock; + } /* Update rss_size due to change in Rx queues */ vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); - return 0; +adev_unlock: + if (locked) { + device_unlock(&pf->adev->dev); + mutex_unlock(&pf->adev_mutex); + } + return ret; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4b3bb19e1d06..5ce413965930 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -6,23 +6,6 @@ #include "ice_flow.h" #include "ice.h" -/* For supporting double VLAN mode, it is necessary to enable or disable certain - * boost tcam entries. The metadata labels names that match the following - * prefixes will be saved to allow enabling double VLAN mode. - */ -#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ -#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ - -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE "TNL_" -static const struct ice_tunnel_type_scan tnls[] = { - { TNL_VXLAN, "TNL_VXLAN_PF" }, - { TNL_GENEVE, "TNL_GENEVE_PF" }, - { TNL_LAST, "" } -}; - static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { /* SWITCH */ { @@ -104,225 +87,6 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) } /** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ - struct ice_buf_hdr *hdr; - u16 section_count; - u16 data_end; - - hdr = (struct ice_buf_hdr *)buf->buf; - /* verify data */ - section_count = le16_to_cpu(hdr->section_count); - if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) - return NULL; - - data_end = le16_to_cpu(hdr->data_end); - if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) - return NULL; - - return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ - struct ice_nvm_table *nvms; - - nvms = (struct ice_nvm_table *) - (ice_seg->device_table + - le32_to_cpu(ice_seg->device_table_count)); - - return (__force struct ice_buf_table *) - (nvms->vers + le32_to_cpu(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (ice_seg) { - state->buf_table = ice_find_buf_table(ice_seg); - if (!state->buf_table) - return NULL; - - state->buf_idx = 0; - return ice_pkg_val_buf(state->buf_table->buf_array); - } - - if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) - return ice_pkg_val_buf(state->buf_table->buf_array + - state->buf_idx); - else - return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (!ice_seg && !state->buf) - return false; - - if (!ice_seg && state->buf) - if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) - return true; - - state->buf = ice_pkg_enum_buf(ice_seg, state); - if (!state->buf) - return false; - - /* start of new buffer, reset section index */ - state->sect_idx = 0; - return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type) -{ - u16 offset, size; - - if (ice_seg) - state->type = sect_type; - - if (!ice_pkg_advance_sect(ice_seg, state)) - return NULL; - - /* scan for next matching section */ - while (state->buf->section_entry[state->sect_idx].type != - cpu_to_le32(state->type)) - if (!ice_pkg_advance_sect(NULL, state)) - return NULL; - - /* validate section */ - offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) - return NULL; - - size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) - return NULL; - - /* make sure the section fits in the buffer */ - if (offset + size > ICE_PKG_BUF_SIZE) - return NULL; - - state->sect_type = - le32_to_cpu(state->buf->section_entry[state->sect_idx].type); - - /* calc pointer to this section */ - state->sect = ((u8 *)state->buf) + - le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - - return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type, u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) -{ - void *entry; - - if (ice_seg) { - if (!handler) - return NULL; - - if (!ice_pkg_enum_section(ice_seg, state, sect_type)) - return NULL; - - state->entry_idx = 0; - state->handler = handler; - } else { - state->entry_idx++; - } - - if (!state->handler) - return NULL; - - /* get entry */ - entry = state->handler(state->sect_type, state->sect, state->entry_idx, - offset); - if (!entry) { - /* end of a section, look for another section of this type */ - if (!ice_pkg_enum_section(NULL, state, 0)) - return NULL; - - state->entry_idx = 0; - entry = state->handler(state->sect_type, state->sect, - state->entry_idx, offset); - } - - return entry; -} - -/** * ice_hw_ptype_ena - check if the PTYPE is enabled or not * @hw: pointer to the HW structure * @ptype: the hardware PTYPE @@ -333,312 +97,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) test_bit(ptype, hw->hw_ptype); } -/** - * ice_marker_ptype_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the Marker PType TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual Marker PType TCAM entries. - */ -static void * -ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_marker_ptype_tcam_section *marker_ptype; - - if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) - return NULL; - - if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - marker_ptype = section; - if (index >= le16_to_cpu(marker_ptype->count)) - return NULL; - - return marker_ptype->tcam + index; -} - -/** - * ice_fill_hw_ptype - fill the enabled PTYPE bit information - * @hw: pointer to the HW structure - */ -static void ice_fill_hw_ptype(struct ice_hw *hw) -{ - struct ice_marker_ptype_tcam_entry *tcam; - struct ice_seg *seg = hw->seg; - struct ice_pkg_enum state; - - bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); - if (!seg) - return; - - memset(&state, 0, sizeof(state)); - - do { - tcam = ice_pkg_enum_entry(seg, &state, - ICE_SID_RXPARSER_MARKER_PTYPE, NULL, - ice_marker_ptype_tcam_handler); - if (tcam && - le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && - le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) - set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); - - seg = NULL; - } while (tcam); -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_boost_tcam_section *boost; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) - return NULL; - - /* cppcheck-suppress nullPointer */ - if (index > ICE_MAX_BST_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - boost = section; - if (index >= le16_to_cpu(boost->count)) - return NULL; - - return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static int -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, - struct ice_boost_tcam_entry **entry) -{ - struct ice_boost_tcam_entry *tcam; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return -EINVAL; - - do { - tcam = ice_pkg_enum_entry(ice_seg, &state, - ICE_SID_RXPARSER_BOOST_TCAM, NULL, - ice_boost_tcam_handler); - if (tcam && le16_to_cpu(tcam->addr) == addr) { - *entry = tcam; - return 0; - } - - ice_seg = NULL; - } while (tcam); - - *entry = NULL; - return -EIO; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_label_section *labels; - - if (!section) - return NULL; - - /* cppcheck-suppress nullPointer */ - if (index > ICE_MAX_LABELS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - labels = section; - if (index >= le16_to_cpu(labels->count)) - return NULL; - - return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, - u16 *value) -{ - struct ice_label *label; - - /* Check for valid label section on first call */ - if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) - return NULL; - - label = ice_pkg_enum_entry(ice_seg, state, type, NULL, - ice_label_enum_handler); - if (!label) - return NULL; - - *value = le16_to_cpu(label->value); - return label->name; -} - -/** - * ice_add_tunnel_hint - * @hw: pointer to the HW structure - * @label_name: label text - * @val: value of the tunnel port boost entry - */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) -{ - if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { - u16 i; - - for (i = 0; tnls[i].type != TNL_LAST; i++) { - size_t len = strlen(tnls[i].label_prefix); - - /* Look for matching label start, before continuing */ - if (strncmp(label_name, tnls[i].label_prefix, len)) - continue; - - /* Make sure this label matches our PF. Note that the PF - * character ('0' - '7') will be located where our - * prefix string's null terminator is located. - */ - if ((label_name[len] - '0') == hw->pf_id) { - hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; - hw->tnl.tbl[hw->tnl.count].valid = false; - hw->tnl.tbl[hw->tnl.count].boost_addr = val; - hw->tnl.tbl[hw->tnl.count].port = 0; - hw->tnl.count++; - break; - } - } - } -} - -/** - * ice_add_dvm_hint - * @hw: pointer to the HW structure - * @val: value of the boost entry - * @enable: true if entry needs to be enabled, or false if needs to be disabled - */ -static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) -{ - if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { - hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; - hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; - hw->dvm_upd.count++; - } -} - -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - memset(&hw->tnl, 0, sizeof(hw->tnl)); - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); - - while (label_name) { - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) - /* check for a tunnel entry */ - ice_add_tunnel_hint(hw, label_name, val); - - /* check for a dvm mode entry */ - else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) - ice_add_dvm_hint(hw, val, true); - - /* check for a svm mode entry */ - else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) - ice_add_dvm_hint(hw, val, false); - - label_name = ice_enum_labels(NULL, 0, &state, &val); - } - - /* Cache the appropriate boost TCAM entry pointers for tunnels */ - for (i = 0; i < hw->tnl.count; i++) { - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, - &hw->tnl.tbl[i].boost_entry); - if (hw->tnl.tbl[i].boost_entry) { - hw->tnl.tbl[i].valid = true; - if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) - hw->tnl.valid_count[hw->tnl.tbl[i].type]++; - } - } - - /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ - for (i = 0; i < hw->dvm_upd.count; i++) - ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, - &hw->dvm_upd.tbl[i].boost_entry); -} - /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ @@ -810,51 +268,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, } /** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * 0 - Means the caller has acquired the global config lock - * and can perform writing of the package. - * -EALREADY - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static int -ice_acquire_global_cfg_lock(struct ice_hw *hw, - enum ice_aq_res_access_type access) -{ - int status; - - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, - ICE_GLOBAL_CFG_LOCK_TIMEOUT); - - if (!status) - mutex_lock(&ice_global_cfg_lock_sw); - else if (status == -EALREADY) - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - - return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ - mutex_unlock(&ice_global_cfg_lock_sw); - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/** * ice_acquire_change_lock * @hw: pointer to the HW structure * @access: access type (read or write) @@ -880,1325 +293,6 @@ void ice_release_change_lock(struct ice_hw *hw) } /** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static int -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - int status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == -EIO) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -int -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static int -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, - bool last_buf, u32 *error_offset, u32 *error_info, - struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - int status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == -EIO) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) -{ - u32 i; - - ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, - pkg_hdr->pkg_format_ver.update, - pkg_hdr->pkg_format_ver.draft); - - /* Search all package segments for the requested segment type */ - for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; - - seg = (struct ice_generic_seg_hdr *) - ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); - - if (le32_to_cpu(seg->seg_type) == seg_type) - return seg; - } - - return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static int -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - int status = 0; - u32 i; - - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - u32 offset, info; - - status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } - - return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - int status; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; - - status = ice_update_pkg_no_lock(hw, bufs, count); - - ice_release_change_lock(hw); - - return status; -} - -static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) -{ - switch (aq_err) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; - case ICE_AQ_RC_ESVN: - return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - return ICE_DDP_PKG_LOAD_ERROR; - default: - return ICE_DDP_PKG_ERR; - } -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_ddp_state -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_buf_hdr *bh; - enum ice_aq_err err; - u32 offset, info, i; - int status; - - if (!bufs || !count) - return ICE_DDP_PKG_ERR; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)bufs; - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_DDP_PKG_SUCCESS; - - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == -EALREADY) - return ICE_DDP_PKG_ALREADY_LOADED; - return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); - } - - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); - - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (le16_to_cpu(bh->section_count)) - if (le32_to_cpu(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - err = hw->adminq.sq_last_status; - state = ice_map_aq_err_to_ddp_state(err); - break; - } - - if (last) - break; - } - - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", - status); - } - - ice_release_global_cfg_lock(hw); - - return state; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static int -ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_ddp_state -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_buf_table *ice_buf_tbl; - int status; - - ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_format_ver.major, - ice_seg->hdr.seg_format_ver.minor, - ice_seg->hdr.seg_format_ver.update, - ice_seg->hdr.seg_format_ver.draft); - - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", - le32_to_cpu(ice_seg->hdr.seg_type), - le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - - ice_buf_tbl = ice_find_buf_table(ice_seg); - - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", - le32_to_cpu(ice_buf_tbl->buf_count)); - - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - le32_to_cpu(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_ddp_state -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ - struct ice_generic_seg_hdr *seg_hdr; - - if (!pkg_hdr) - return ICE_DDP_PKG_ERR; - - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); - if (seg_hdr) { - struct ice_meta_sect *meta; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - /* Get package information from the Metadata Section */ - meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, - ICE_SID_METADATA); - if (!meta) { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - hw->pkg_ver = meta->ver; - memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); - - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta->ver.major, meta->ver.minor, meta->ver.update, - meta->ver.draft, meta->name); - - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; - memcpy(hw->ice_seg_id, seg_hdr->seg_id, - sizeof(hw->ice_seg_id)); - - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_format_ver.major, - seg_hdr->seg_format_ver.minor, - seg_hdr->seg_format_ver.update, - seg_hdr->seg_format_ver.draft, - seg_hdr->seg_id); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_aqc_get_pkg_info_resp *pkg_info; - u16 size; - u32 i; - - size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = kzalloc(size, GFP_KERNEL); - if (!pkg_info) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { - state = ICE_DDP_PKG_ERR; - goto init_pkg_free_alloc; - } - - for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT 4 - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; - u8 place = 0; - - if (pkg_info->pkg_info[i].is_active) { - flags[place++] = 'A'; - hw->active_pkg_ver = pkg_info->pkg_info[i].ver; - hw->active_track_id = - le32_to_cpu(pkg_info->pkg_info[i].track_id); - memcpy(hw->active_pkg_name, - pkg_info->pkg_info[i].name, - sizeof(pkg_info->pkg_info[i].name)); - hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; - } - if (pkg_info->pkg_info[i].is_active_at_boot) - flags[place++] = 'B'; - if (pkg_info->pkg_info[i].is_modified) - flags[place++] = 'M'; - if (pkg_info->pkg_info[i].is_in_nvm) - flags[place++] = 'N'; - - ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", - i, pkg_info->pkg_info[i].ver.major, - pkg_info->pkg_info[i].ver.minor, - pkg_info->pkg_info[i].ver.update, - pkg_info->pkg_info[i].ver.draft, - pkg_info->pkg_info[i].name, flags); - } - -init_pkg_free_alloc: - kfree(pkg_info); - - return state; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ - u32 seg_count; - u32 i; - - if (len < struct_size(pkg, seg_offset, 1)) - return ICE_DDP_PKG_INVALID_FILE; - - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_DDP_PKG_INVALID_FILE; - - /* pkg must have at least one segment */ - seg_count = le32_to_cpu(pkg->seg_count); - if (seg_count < 1) - return ICE_DDP_PKG_INVALID_FILE; - - /* make sure segment array fits in package length */ - if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_DDP_PKG_INVALID_FILE; - - /* all segments must fit within length */ - for (i = 0; i < seg_count; i++) { - u32 off = le32_to_cpu(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; - - /* segment header must fit */ - if (len < off + sizeof(*seg)) - return ICE_DDP_PKG_INVALID_FILE; - - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - - /* segment body must fit */ - if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_DDP_PKG_INVALID_FILE; - } - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ - if (hw->pkg_copy) { - devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); - hw->pkg_copy = NULL; - hw->pkg_size = 0; - } - hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX 0 - - /* setup Switch block input mask, which is 48-bits in two parts */ - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ - if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || - (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && - pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) - return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; - else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || - (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && - pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) - return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_ddp_state -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, - struct ice_seg **seg) -{ - struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_ddp_state state; - u16 size; - u32 i; - - /* Check package version compatibility */ - state = ice_chk_pkg_version(&hw->pkg_ver); - if (state) { - ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return state; - } - - /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, - ospkg); - if (!*seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = kzalloc(size, GFP_KERNEL); - if (!pkg) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { - state = ICE_DDP_PKG_LOAD_ERROR; - goto fw_ddp_compat_free_alloc; - } - - for (i = 0; i < le32_to_cpu(pkg->count); i++) { - /* loop till we find the NVM package */ - if (!pkg->pkg_info[i].is_in_nvm) - continue; - if ((*seg)->hdr.seg_format_ver.major != - pkg->pkg_info[i].ver.major || - (*seg)->hdr.seg_format_ver.minor > - pkg->pkg_info[i].ver.minor) { - state = ICE_DDP_PKG_FW_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); - } - /* done processing NVM package so break */ - break; - } -fw_ddp_compat_free_alloc: - kfree(pkg); - return state; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= le16_to_cpu(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = le16_to_cpu(fv_section->base_offset) + index; - return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in HW for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ - u16 prof_index = 0, j, max_prof_index = 0; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - bool flag = false; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return -EINVAL; - - ice_seg = hw->seg; - - do { - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* in the profile that not be used, the prot_id is set to 0xff - * and the off is set to 0x1ff for all the field vectors. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id != ICE_PROT_INVALID || - fv->ew[j].off != ICE_FV_OFFSET_INVAL) - flag = true; - if (flag && prof_index > max_prof_index) - max_prof_index = prof_index; - - prof_index++; - flag = false; - } while (fv); - - hw->switch_info->max_used_prof_index = max_prof_index; - - return 0; -} - -/** - * ice_get_ddp_pkg_state - get DDP pkg state after download - * @hw: pointer to the HW struct - * @already_loaded: indicates if pkg was already loaded onto the device - */ -static enum ice_ddp_state -ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) -{ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { - if (already_loaded) - return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; - else - return ICE_DDP_PKG_SUCCESS; - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; - } else { - return ICE_DDP_PKG_ERR; - } -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ - bool already_loaded = false; - enum ice_ddp_state state; - struct ice_pkg_hdr *pkg; - struct ice_seg *seg; - - if (!buf || !len) - return ICE_DDP_PKG_ERR; - - pkg = (struct ice_pkg_hdr *)buf; - state = ice_verify_pkg(pkg, len); - if (state) { - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - state); - return state; - } - - /* initialize package info */ - state = ice_init_pkg_info(hw, pkg); - if (state) - return state; - - /* before downloading the package, check package version for - * compatibility with driver - */ - state = ice_chk_pkg_compat(hw, pkg, &seg); - if (state) - return state; - - /* initialize package hints and then download package */ - ice_init_pkg_hints(hw, seg); - state = ice_download_pkg(hw, seg); - if (state == ICE_DDP_PKG_ALREADY_LOADED) { - ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - already_loaded = true; - } - - /* Get information on the package currently loaded in HW, then make sure - * the driver is compatible with this version. - */ - if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { - state = ice_get_pkg_info(hw); - if (!state) - state = ice_get_ddp_pkg_state(hw, already_loaded); - } - - if (ice_is_init_pkg_successful(state)) { - hw->seg = seg; - /* on successful package download update other required - * registers to support the package and fill HW tables - * with package content. - */ - ice_init_pkg_regs(hw); - ice_fill_blk_tbls(hw); - ice_fill_hw_ptype(hw); - ice_get_prof_index_max(hw); - } else { - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - state); - } - - return state; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_ddp_state -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ - enum ice_ddp_state state; - u8 *buf_copy; - - if (!buf || !len) - return ICE_DDP_PKG_ERR; - - buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - - state = ice_init_pkg(hw, buf_copy, len); - if (!ice_is_init_pkg_successful(state)) { - /* Free the copy, since we failed to initialize the package */ - devm_kfree(ice_hw_to_dev(hw), buf_copy); - } else { - /* Track the copied pkg so we can free it later */ - hw->pkg_copy = buf_copy; - hw->pkg_size = len; - } - - return state; -} - -/** - * ice_is_init_pkg_successful - check if DDP init was successful - * @state: state of the DDP pkg after download - */ -bool ice_is_init_pkg_successful(enum ice_ddp_state state) -{ - switch (state) { - case ICE_DDP_PKG_SUCCESS: - case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: - case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: - return true; - default: - return false; - } -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ - struct ice_buf_build *bld; - struct ice_buf_hdr *buf; - - bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); - if (!bld) - return NULL; - - buf = (struct ice_buf_hdr *)bld; - buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, - section_entry)); - return bld; -} - -static bool ice_is_gtp_u_profile(u16 prof_idx) -{ - return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && - prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || - prof_idx == ICE_PROFID_IPV4_GTPU_TEID; -} - -static bool ice_is_gtp_c_profile(u16 prof_idx) -{ - switch (prof_idx) { - case ICE_PROFID_IPV4_GTPC_TEID: - case ICE_PROFID_IPV4_GTPC_NO_TEID: - case ICE_PROFID_IPV6_GTPC_TEID: - case ICE_PROFID_IPV6_GTPC_NO_TEID: - return true; - default: - return false; - } -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - * @prof_idx: profile index to check - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) -{ - u16 i; - - if (ice_is_gtp_c_profile(prof_idx)) - return ICE_PROF_TUN_GTPC; - - if (ice_is_gtp_u_profile(prof_idx)) - return ICE_PROF_TUN_GTPU; - - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { - /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && - fv->ew[i].off == ICE_VNI_OFFSET) - return ICE_PROF_TUN_UDP; - - /* GRE tunnel will have GRE protocol */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) - return ICE_PROF_TUN_GRE; - } - - return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, - unsigned long *bm) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - if (req_profs == ICE_PROF_ALL) { - bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); - return; - } - - memset(&state, 0, sizeof(state)); - bitmap_zero(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; - do { - enum ice_prof_type prof_type; - u32 offset; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - ice_seg = NULL; - - if (fv) { - /* Determine field vector type */ - prof_type = ice_get_sw_prof_type(hw, fv, offset); - - if (req_profs & prof_type) - set_bit((u16)offset, bm); - } - } while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @lkups: list of protocol types - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and offset and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -int -ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, - unsigned long *bm, struct list_head *fv_list) -{ - struct ice_sw_fv_list_entry *fvl; - struct ice_sw_fv_list_entry *tmp; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!lkups->n_val_words || !hw->seg) - return -EINVAL; - - ice_seg = hw->seg; - do { - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* If field vector is not in the bitmap list, then skip this - * profile. - */ - if (!test_bit((u16)offset, bm)) - continue; - - for (i = 0; i < lkups->n_val_words; i++) { - int j; - - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == - lkups->fv_words[i].prot_id && - fv->ew[j].off == lkups->fv_words[i].off) - break; - if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == lkups->n_val_words) { - fvl = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*fvl), GFP_KERNEL); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - list_add(&fvl->list_entry, fv_list); - break; - } - } - } while (fv); - if (list_empty(fv_list)) { - dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package"); - return -EIO; - } - - return 0; - -err: - list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { - list_del(&fvl->list_entry); - devm_kfree(ice_hw_to_dev(hw), fvl); - } - - return -ENOMEM; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return; - - ice_seg = hw->seg; - do { - u32 off; - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &off, ice_sw_fv_handler); - ice_seg = NULL; - if (!fv) - break; - - bitmap_zero(hw->switch_info->prof_res_bm[off], - ICE_MAX_FV_WORDS); - - /* Determine empty field vector indices, these can be - * used for recipe results. Skip index 0, since it is - * always used for Switch ID. - */ - for (i = 1; i < ICE_MAX_FV_WORDS; i++) - if (fv->ew[i].prot_id == ICE_PROT_INVALID && - fv->ew[i].off == ICE_FV_OFFSET_INVAL) - set_bit(i, hw->switch_info->prof_res_bm[off]); - } while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ - devm_kfree(ice_hw_to_dev(hw), bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static int -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return -EINVAL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't increase table size */ - section_count = le16_to_cpu(buf->section_count); - if (section_count > 0) - return -EIO; - - if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return -EIO; - bld->reserved_section_table_entries += count; - - data_end = le16_to_cpu(buf->data_end) + - flex_array_size(buf, section_entry, count); - buf->data_end = cpu_to_le16(data_end); - - return 0; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ - struct ice_buf_hdr *buf; - u16 sect_count; - u16 data_end; - - if (!bld || !type || !size) - return NULL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* check for enough space left in buffer */ - data_end = le16_to_cpu(buf->data_end); - - /* section start must align on 4 byte boundary */ - data_end = ALIGN(data_end, 4); - - if ((data_end + size) > ICE_MAX_S_DATA_END) - return NULL; - - /* check for more available section table entries */ - sect_count = le16_to_cpu(buf->section_count); - if (sect_count < bld->reserved_section_table_entries) { - void *section_ptr = ((u8 *)buf) + data_end; - - buf->section_entry[sect_count].offset = cpu_to_le16(data_end); - buf->section_entry[sect_count].size = cpu_to_le16(size); - buf->section_entry[sect_count].type = cpu_to_le32(type); - - data_end += size; - buf->data_end = cpu_to_le16(data_end); - - buf->section_count = cpu_to_le16(sect_count + 1); - return section_ptr; - } - - /* no free section table entries */ - return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section) -{ - struct ice_buf_build *buf; - - if (!section) - return NULL; - - buf = ice_pkg_buf_alloc(hw); - if (!buf) - return NULL; - - if (ice_pkg_buf_reserve_section(buf, 1)) - goto ice_pkg_buf_alloc_single_section_err; - - *section = ice_pkg_buf_alloc_section(buf, type, size); - if (!*section) - goto ice_pkg_buf_alloc_single_section_err; - - return buf; - -ice_pkg_buf_alloc_single_section_err: - ice_pkg_buf_free(hw, buf); - return NULL; -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return le16_to_cpu(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ - if (!bld) - return NULL; - - return &bld->buf; -} - -/** * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port @@ -2297,10 +391,11 @@ ice_upd_dvm_boost_entry_err: */ int ice_set_dvm_boost_entries(struct ice_hw *hw) { - int status; u16 i; for (i = 0; i < hw->dvm_upd.count; i++) { + int status; + status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]); if (status) return status; @@ -2757,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) count++; list_for_each_entry(tmp2, list2, list) chk_count++; - /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; @@ -5102,12 +3196,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - int status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) { + int status; + status = ice_rem_prof_id(hw, blk, d); if (status) return status; @@ -5158,12 +3253,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - int status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) if (p->profile_cookie == hdl) { + int status; + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 9c530c86703e..7af7c8e9aa4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -6,75 +6,6 @@ #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ 1 -#define ICE_PKG_SUPP_VER_MNR 3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ 1 -#define ICE_PKG_FMT_VER_MNR 0 -#define ICE_PKG_FMT_VER_UPD 0 -#define ICE_PKG_FMT_VER_DFT 0 - -#define ICE_PKG_CNT 4 - -enum ice_ddp_state { - /* Indicates that this call to ice_init_pkg - * successfully loaded the requested DDP package - */ - ICE_DDP_PKG_SUCCESS = 0, - - /* Generic error for already loaded errors, it is mapped later to - * the more specific one (one of the next 3) - */ - ICE_DDP_PKG_ALREADY_LOADED = -1, - - /* Indicates that a DDP package of the same version has already been - * loaded onto the device by a previous call or by another PF - */ - ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, - - /* The device has a DDP package that is not supported by the driver */ - ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, - - /* The device has a compatible package - * (but different from the request) already loaded - */ - ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, - - /* The firmware loaded on the device is not compatible with - * the DDP package loaded - */ - ICE_DDP_PKG_FW_MISMATCH = -5, - - /* The DDP package file is invalid */ - ICE_DDP_PKG_INVALID_FILE = -6, - - /* The version of the DDP package provided is higher than - * the driver supports - */ - ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, - - /* The version of the DDP package provided is lower than the - * driver supports - */ - ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, - - /* The signature of the DDP package file provided is invalid */ - ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, - - /* The DDP package file security revision is too low and not - * supported by firmware - */ - ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, - - /* An error occurred in firmware while loading the DDP package */ - ICE_DDP_PKG_LOAD_ERROR = -11, - - /* Other errors */ - ICE_DDP_PKG_ERR = -12 -}; - int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 974d14a83b2e..4f42e14ed3ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -3,205 +3,7 @@ #ifndef _ICE_FLEX_TYPE_H_ #define _ICE_FLEX_TYPE_H_ - -#define ICE_FV_OFFSET_INVAL 0x1FF - -/* Extraction Sequence (Field Vector) Table */ -struct ice_fv_word { - u8 prot_id; - u16 off; /* Offset within the protocol header */ - u8 resvrd; -} __packed; - -#define ICE_MAX_NUM_PROFILES 256 - -#define ICE_MAX_FV_WORDS 48 -struct ice_fv { - struct ice_fv_word ew[ICE_MAX_FV_WORDS]; -}; - -/* Package and segment headers and tables */ -struct ice_pkg_hdr { - struct ice_pkg_ver pkg_format_ver; - __le32 seg_count; - __le32 seg_offset[]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE 0x00000010 - __le32 seg_type; - struct ice_pkg_ver seg_format_ver; - __le32 seg_size; - char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { - struct { - __le16 device_id; - __le16 vendor_id; - } dev_vend_id; - __le32 id; -}; - -struct ice_device_id_entry { - union ice_device_id device; - union ice_device_id sub_device; -}; - -struct ice_seg { - struct ice_generic_seg_hdr hdr; - __le32 device_table_count; - struct ice_device_id_entry device_table[]; -}; - -struct ice_nvm_table { - __le32 table_count; - __le32 vers[]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE 4096 - u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { - __le32 buf_count; - struct ice_buf buf_array[]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { - struct ice_generic_seg_hdr hdr; - struct ice_pkg_ver pkg_ver; - __le32 rsvd; - char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF 12 -#define ICE_MAX_S_OFF 4095 -#define ICE_MIN_S_SZ 1 -#define ICE_MAX_S_SZ 4084 - -/* section information */ -struct ice_section_entry { - __le32 type; - __le16 offset; - __le16 size; -}; - -#define ICE_MIN_S_COUNT 1 -#define ICE_MAX_S_COUNT 511 -#define ICE_MIN_S_DATA_END 12 -#define ICE_MAX_S_DATA_END 4096 - -#define ICE_METADATA_BUF 0x80000000 - -struct ice_buf_hdr { - __le16 section_count; - __le16 data_end; - struct ice_section_entry section_entry[]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ - (ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA 1 -#define ICE_SID_XLT0_SW 10 -#define ICE_SID_XLT_KEY_BUILDER_SW 11 -#define ICE_SID_XLT1_SW 12 -#define ICE_SID_XLT2_SW 13 -#define ICE_SID_PROFID_TCAM_SW 14 -#define ICE_SID_PROFID_REDIR_SW 15 -#define ICE_SID_FLD_VEC_SW 16 -#define ICE_SID_CDID_KEY_BUILDER_SW 17 - -struct ice_meta_sect { - struct ice_pkg_ver ver; -#define ICE_META_SECT_NAME_SIZE 28 - char name[ICE_META_SECT_NAME_SIZE]; - __le32 track_id; -}; - -#define ICE_SID_CDID_REDIR_SW 18 - -#define ICE_SID_XLT0_ACL 20 -#define ICE_SID_XLT_KEY_BUILDER_ACL 21 -#define ICE_SID_XLT1_ACL 22 -#define ICE_SID_XLT2_ACL 23 -#define ICE_SID_PROFID_TCAM_ACL 24 -#define ICE_SID_PROFID_REDIR_ACL 25 -#define ICE_SID_FLD_VEC_ACL 26 -#define ICE_SID_CDID_KEY_BUILDER_ACL 27 -#define ICE_SID_CDID_REDIR_ACL 28 - -#define ICE_SID_XLT0_FD 30 -#define ICE_SID_XLT_KEY_BUILDER_FD 31 -#define ICE_SID_XLT1_FD 32 -#define ICE_SID_XLT2_FD 33 -#define ICE_SID_PROFID_TCAM_FD 34 -#define ICE_SID_PROFID_REDIR_FD 35 -#define ICE_SID_FLD_VEC_FD 36 -#define ICE_SID_CDID_KEY_BUILDER_FD 37 -#define ICE_SID_CDID_REDIR_FD 38 - -#define ICE_SID_XLT0_RSS 40 -#define ICE_SID_XLT_KEY_BUILDER_RSS 41 -#define ICE_SID_XLT1_RSS 42 -#define ICE_SID_XLT2_RSS 43 -#define ICE_SID_PROFID_TCAM_RSS 44 -#define ICE_SID_PROFID_REDIR_RSS 45 -#define ICE_SID_FLD_VEC_RSS 46 -#define ICE_SID_CDID_KEY_BUILDER_RSS 47 -#define ICE_SID_CDID_REDIR_RSS 48 - -#define ICE_SID_RXPARSER_MARKER_PTYPE 55 -#define ICE_SID_RXPARSER_BOOST_TCAM 56 -#define ICE_SID_RXPARSER_METADATA_INIT 58 -#define ICE_SID_TXPARSER_BOOST_TCAM 66 - -#define ICE_SID_XLT0_PE 80 -#define ICE_SID_XLT_KEY_BUILDER_PE 81 -#define ICE_SID_XLT1_PE 82 -#define ICE_SID_XLT2_PE 83 -#define ICE_SID_PROFID_TCAM_PE 84 -#define ICE_SID_PROFID_REDIR_PE 85 -#define ICE_SID_FLD_VEC_PE 86 -#define ICE_SID_CDID_KEY_BUILDER_PE 87 -#define ICE_SID_CDID_REDIR_PE 88 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST 0x80000010 -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST 0x80000038 - -enum ice_block { - ICE_BLK_SW = 0, - ICE_BLK_ACL, - ICE_BLK_FD, - ICE_BLK_RSS, - ICE_BLK_PE, - ICE_BLK_COUNT -}; - -enum ice_sect { - ICE_XLT0 = 0, - ICE_XLT_KB, - ICE_XLT1, - ICE_XLT2, - ICE_PROF_TCAM, - ICE_PROF_REDIR, - ICE_VEC_TBL, - ICE_CDID_KB, - ICE_CDID_REDIR, - ICE_SECT_COUNT -}; +#include "ice_ddp.h" /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 @@ -283,134 +85,6 @@ struct ice_ptype_attributes { enum ice_ptype_attrib_type attrib; }; -/* package labels */ -struct ice_label { - __le16 value; -#define ICE_PKG_LABEL_SIZE 64 - char name[ICE_PKG_LABEL_SIZE]; -}; - -struct ice_label_section { - __le16 count; - struct ice_label label[]; -}; - -#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - struct_size((struct ice_label_section *)0, label, 1) - \ - sizeof(struct ice_label), sizeof(struct ice_label)) - -struct ice_sw_fv_section { - __le16 count; - __le16 base_offset; - struct ice_fv fv[]; -}; - -struct ice_sw_fv_list_entry { - struct list_head list_entry; - u32 profile_id; - struct ice_fv *fv_ptr; -}; - -/* The BOOST TCAM stores the match packet header in reverse order, meaning - * the fields are reversed; in addition, this means that the normally big endian - * fields of the packet are now little endian. - */ -struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY 15 - u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; - __le16 hv_dst_port_key; - __le16 hv_src_port_key; - u8 tcam_search_key; -} __packed; - -struct ice_boost_key { - struct ice_boost_key_value key; - struct ice_boost_key_value key2; -}; - -/* package Boost TCAM entry */ -struct ice_boost_tcam_entry { - __le16 addr; - __le16 reserved; - /* break up the 40 bytes of key into different fields */ - struct ice_boost_key key; - u8 boost_hit_index_group; - /* The following contains bitfields which are not on byte boundaries. - * These fields are currently unused by driver software. - */ -#define ICE_BOOST_BIT_FIELDS 43 - u8 bit_fields[ICE_BOOST_BIT_FIELDS]; -}; - -struct ice_boost_tcam_section { - __le16 count; - __le16 reserved; - struct ice_boost_tcam_entry tcam[]; -}; - -#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ - sizeof(struct ice_boost_tcam_entry), \ - sizeof(struct ice_boost_tcam_entry)) - -/* package Marker Ptype TCAM entry */ -struct ice_marker_ptype_tcam_entry { -#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 - __le16 addr; - __le16 ptype; - u8 keys[20]; -}; - -struct ice_marker_ptype_tcam_section { - __le16 count; - __le16 reserved; - struct ice_marker_ptype_tcam_entry tcam[]; -}; - -#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ - ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ - sizeof(struct ice_marker_ptype_tcam_entry), \ - sizeof(struct ice_marker_ptype_tcam_entry)) - -struct ice_xlt1_section { - __le16 count; - __le16 offset; - u8 value[]; -}; - -struct ice_xlt2_section { - __le16 count; - __le16 offset; - __le16 value[]; -}; - -struct ice_prof_redir_section { - __le16 count; - __le16 offset; - u8 redir_value[]; -}; - -/* package buffer building */ - -struct ice_buf_build { - struct ice_buf buf; - u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { - struct ice_buf_table *buf_table; - u32 buf_idx; - - u32 type; - struct ice_buf_hdr *buf; - u32 sect_idx; - void *sect; - u32 sect_type; - - u32 entry_idx; - void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; - /* Tunnel enabling */ enum ice_tunnel_type { diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 40e678cfb507..aff7a141c30d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) void ice_fltr_remove_all(struct ice_vsi *vsi) { ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); + /* sync netdev filters if exist */ + if (vsi->netdev) { + __dev_uc_unsync(vsi->netdev, NULL); + __dev_mc_unsync(vsi->netdev, NULL); + } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index b5a7f246d230..8dec748bb53a 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -3,15 +3,18 @@ #include "ice.h" #include "ice_lib.h" -#include <linux/tty_driver.h> /** - * ice_gnss_do_write - Write data to internal GNSS + * ice_gnss_do_write - Write data to internal GNSS receiver * @pf: board private structure * @buf: command buffer * @size: command buffer size * * Write UBX command data to the GNSS receiver + * + * Return: + * * number of bytes written - success + * * negative - error code */ static unsigned int ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) @@ -82,6 +85,12 @@ static void ice_gnss_write_pending(struct kthread_work *work) write_work); struct ice_pf *pf = gnss->back; + if (!pf) + return; + + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return; + if (!list_empty(&gnss->queue)) { struct gnss_write_buf *write_buf = NULL; unsigned int bytes; @@ -102,16 +111,14 @@ static void ice_gnss_write_pending(struct kthread_work *work) * ice_gnss_read - Read data from internal GNSS module * @work: GNSS read work structure * - * Read the data from internal GNSS receiver, number of bytes read will be - * returned in *read_data parameter. + * Read the data from internal GNSS receiver, write it to gnss_dev. */ static void ice_gnss_read(struct kthread_work *work) { struct gnss_serial *gnss = container_of(work, struct gnss_serial, read_work.work); + unsigned int i, bytes_read, data_len, count; struct ice_aqc_link_topo_addr link_topo; - unsigned int i, bytes_read, data_len; - struct tty_port *port; struct ice_pf *pf; struct ice_hw *hw; __be16 data_len_b; @@ -120,14 +127,15 @@ static void ice_gnss_read(struct kthread_work *work) int err = 0; pf = gnss->back; - if (!pf || !gnss->tty || !gnss->tty->port) { + if (!pf) { err = -EFAULT; goto exit; } - hw = &pf->hw; - port = gnss->tty->port; + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return; + hw = &pf->hw; buf = (char *)get_zeroed_page(GFP_KERNEL); if (!buf) { err = -ENOMEM; @@ -159,7 +167,6 @@ static void ice_gnss_read(struct kthread_work *work) } data_len = min_t(typeof(data_len), data_len, PAGE_SIZE); - data_len = tty_buffer_request_room(port, data_len); if (!data_len) { err = -ENOMEM; goto exit_buf; @@ -179,12 +186,11 @@ static void ice_gnss_read(struct kthread_work *work) goto exit_buf; } - /* Send the data to the tty layer for users to read. This doesn't - * actually push the data through unless tty->low_latency is set. - */ - tty_insert_flip_string(port, buf, i); - tty_flip_buffer_push(port); - + count = gnss_insert_raw(pf->gnss_dev, buf, i); + if (count != i) + dev_warn(ice_pf_to_dev(pf), + "gnss_insert_raw ret=%d size=%d\n", + count, i); exit_buf: free_page((unsigned long)buf); kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, @@ -195,11 +201,16 @@ exit: } /** - * ice_gnss_struct_init - Initialize GNSS structure for the TTY + * ice_gnss_struct_init - Initialize GNSS receiver * @pf: Board private structure - * @index: TTY device index + * + * Initialize GNSS structures and workers. + * + * Return: + * * pointer to initialized gnss_serial struct - success + * * NULL - error */ -static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) +static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct kthread_worker *kworker; @@ -209,17 +220,12 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) if (!gnss) return NULL; - mutex_init(&gnss->gnss_mutex); - gnss->open_count = 0; gnss->back = pf; - pf->gnss_serial[index] = gnss; + pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); INIT_LIST_HEAD(&gnss->queue); kthread_init_work(&gnss->write_work, ice_gnss_write_pending); - /* Allocate a kworker for handling work required for the GNSS TTY - * writes. - */ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); @@ -232,139 +238,100 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) } /** - * ice_gnss_tty_open - Initialize GNSS structures on TTY device open - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_open - Open GNSS device + * @gdev: pointer to the gnss device struct * - * This routine is mandatory. If this routine is not filled in, the attempted - * open will fail with ENODEV. + * Open GNSS device and start filling the read buffer for consumer. + * + * Return: + * * 0 - success + * * negative - error code */ -static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp) +static int ice_gnss_open(struct gnss_device *gdev) { + struct ice_pf *pf = gnss_get_drvdata(gdev); struct gnss_serial *gnss; - struct ice_pf *pf; - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return -EFAULT; - /* Clear the pointer in case something fails */ - tty->driver_data = NULL; - - /* Get the serial object associated with this tty pointer */ - gnss = pf->gnss_serial[tty->index]; - if (!gnss) { - /* Initialize GNSS struct on the first device open */ - gnss = ice_gnss_struct_init(pf, tty->index); - if (!gnss) - return -ENOMEM; - } + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return -EFAULT; - mutex_lock(&gnss->gnss_mutex); + gnss = pf->gnss_serial; + if (!gnss) + return -ENODEV; - /* Save our structure within the tty structure */ - tty->driver_data = gnss; - gnss->tty = tty; - gnss->open_count++; kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0); - mutex_unlock(&gnss->gnss_mutex); - return 0; } /** - * ice_gnss_tty_close - Cleanup GNSS structures on tty device close - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_close - Close GNSS device + * @gdev: pointer to the gnss device struct + * + * Close GNSS device, cancel worker, stop filling the read buffer. */ -static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp) +static void ice_gnss_close(struct gnss_device *gdev) { - struct gnss_serial *gnss = tty->driver_data; - struct ice_pf *pf; - - if (!gnss) - return; + struct ice_pf *pf = gnss_get_drvdata(gdev); + struct gnss_serial *gnss; - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return; - mutex_lock(&gnss->gnss_mutex); - - if (!gnss->open_count) { - /* Port was never opened */ - dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n"); - goto exit; - } + gnss = pf->gnss_serial; + if (!gnss) + return; - gnss->open_count--; - if (gnss->open_count <= 0) { - /* Port is in shutdown state */ - kthread_cancel_delayed_work_sync(&gnss->read_work); - } -exit: - mutex_unlock(&gnss->gnss_mutex); + kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); } /** - * ice_gnss_tty_write - Write GNSS data - * @tty: pointer to the tty_struct + * ice_gnss_write - Write to GNSS device + * @gdev: pointer to the gnss device struct * @buf: pointer to the user data - * @count: the number of characters queued to be sent to the HW + * @count: size of the buffer to be sent to the GNSS device * - * The write function call is called by the user when there is data to be sent - * to the hardware. First the tty core receives the call, and then it passes the - * data on to the tty driver's write function. The tty core also tells the tty - * driver the size of the data being sent. - * If any errors happen during the write call, a negative error value should be - * returned instead of the number of characters queued to be written. + * Return: + * * number of written bytes - success + * * negative - error code */ static int -ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, + size_t count) { + struct ice_pf *pf = gnss_get_drvdata(gdev); struct gnss_write_buf *write_buf; struct gnss_serial *gnss; unsigned char *cmd_buf; - struct ice_pf *pf; int err = count; /* We cannot write a single byte using our I2C implementation. */ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) return -EINVAL; - gnss = tty->driver_data; - if (!gnss) - return -EFAULT; - - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return -EFAULT; - /* Only allow to write on TTY 0 */ - if (gnss != pf->gnss_serial[0]) - return -EIO; - - mutex_lock(&gnss->gnss_mutex); + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return -EFAULT; - if (!gnss->open_count) { - err = -EINVAL; - goto exit; - } + gnss = pf->gnss_serial; + if (!gnss) + return -ENODEV; cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); - if (!cmd_buf) { - err = -ENOMEM; - goto exit; - } + if (!cmd_buf) + return -ENOMEM; memcpy(cmd_buf, buf, count); - - /* Send the data out to a hardware port */ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); if (!write_buf) { - err = -ENOMEM; - goto exit; + kfree(cmd_buf); + return -ENOMEM; } write_buf->buf = cmd_buf; @@ -372,136 +339,89 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) INIT_LIST_HEAD(&write_buf->queue); list_add_tail(&write_buf->queue, &gnss->queue); kthread_queue_work(gnss->kworker, &gnss->write_work); -exit: - mutex_unlock(&gnss->gnss_mutex); + return err; } +static const struct gnss_operations ice_gnss_ops = { + .open = ice_gnss_open, + .close = ice_gnss_close, + .write_raw = ice_gnss_write, +}; + /** - * ice_gnss_tty_write_room - Returns the numbers of characters to be written. - * @tty: pointer to the tty_struct + * ice_gnss_register - Register GNSS receiver + * @pf: Board private structure + * + * Allocate and register GNSS receiver in the Linux GNSS subsystem. * - * This routine returns the numbers of characters the tty driver will accept - * for queuing to be written or 0 if either the TTY is not open or user - * tries to write to the TTY other than the first. + * Return: + * * 0 - success + * * negative - error code */ -static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty) +static int ice_gnss_register(struct ice_pf *pf) { - struct gnss_serial *gnss = tty->driver_data; - - /* Only allow to write on TTY 0 */ - if (!gnss || gnss != gnss->back->gnss_serial[0]) - return 0; - - mutex_lock(&gnss->gnss_mutex); + struct gnss_device *gdev; + int ret; + + gdev = gnss_allocate_device(ice_pf_to_dev(pf)); + if (!gdev) { + dev_err(ice_pf_to_dev(pf), + "gnss_allocate_device returns NULL\n"); + return -ENOMEM; + } - if (!gnss->open_count) { - mutex_unlock(&gnss->gnss_mutex); - return 0; + gdev->ops = &ice_gnss_ops; + gdev->type = GNSS_TYPE_UBX; + gnss_set_drvdata(gdev, pf); + ret = gnss_register_device(gdev); + if (ret) { + dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n", + ret); + gnss_put_device(gdev); + } else { + pf->gnss_dev = gdev; } - mutex_unlock(&gnss->gnss_mutex); - return ICE_GNSS_TTY_WRITE_BUF; + return ret; } -static const struct tty_operations tty_gps_ops = { - .open = ice_gnss_tty_open, - .close = ice_gnss_tty_close, - .write = ice_gnss_tty_write, - .write_room = ice_gnss_tty_write_room, -}; - /** - * ice_gnss_create_tty_driver - Create a TTY driver for GNSS + * ice_gnss_deregister - Deregister GNSS receiver * @pf: Board private structure + * + * Deregister GNSS receiver from the Linux GNSS subsystem, + * release its resources. */ -static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf) +static void ice_gnss_deregister(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - const int ICE_TTYDRV_NAME_MAX = 14; - struct tty_driver *tty_driver; - char *ttydrv_name; - unsigned int i; - int err; - - tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES, - TTY_DRIVER_REAL_RAW); - if (IS_ERR(tty_driver)) { - dev_err(dev, "Failed to allocate memory for GNSS TTY\n"); - return NULL; - } - - ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL); - if (!ttydrv_name) { - tty_driver_kref_put(tty_driver); - return NULL; - } - - snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_", - (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn)); - - /* Initialize the tty driver*/ - tty_driver->owner = THIS_MODULE; - tty_driver->driver_name = dev_driver_string(dev); - tty_driver->name = (const char *)ttydrv_name; - tty_driver->type = TTY_DRIVER_TYPE_SERIAL; - tty_driver->subtype = SERIAL_TYPE_NORMAL; - tty_driver->init_termios = tty_std_termios; - tty_driver->init_termios.c_iflag &= ~INLCR; - tty_driver->init_termios.c_iflag |= IGNCR; - tty_driver->init_termios.c_oflag &= ~OPOST; - tty_driver->init_termios.c_lflag &= ~ICANON; - tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX); - /* baud rate 9600 */ - tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600); - tty_driver->driver_state = pf; - tty_set_operations(tty_driver, &tty_gps_ops); - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { - pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]), - GFP_KERNEL); - pf->gnss_serial[i] = NULL; - - tty_port_init(pf->gnss_tty_port[i]); - tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i); - } - - err = tty_register_driver(tty_driver); - if (err) { - dev_err(dev, "Failed to register TTY driver err=%d\n", err); - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { - tty_port_destroy(pf->gnss_tty_port[i]); - kfree(pf->gnss_tty_port[i]); - } - kfree(ttydrv_name); - tty_driver_kref_put(pf->ice_gnss_tty_driver); - - return NULL; + if (pf->gnss_dev) { + gnss_deregister_device(pf->gnss_dev); + gnss_put_device(pf->gnss_dev); + pf->gnss_dev = NULL; } - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) - dev_info(dev, "%s%d registered\n", ttydrv_name, i); - - return tty_driver; } /** - * ice_gnss_init - Initialize GNSS TTY support + * ice_gnss_init - Initialize GNSS support * @pf: Board private structure */ void ice_gnss_init(struct ice_pf *pf) { - struct tty_driver *tty_driver; + int ret; - tty_driver = ice_gnss_create_tty_driver(pf); - if (!tty_driver) + pf->gnss_serial = ice_gnss_struct_init(pf); + if (!pf->gnss_serial) return; - pf->ice_gnss_tty_driver = tty_driver; - - set_bit(ICE_FLAG_GNSS, pf->flags); - dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n"); + ret = ice_gnss_register(pf); + if (!ret) { + set_bit(ICE_FLAG_GNSS, pf->flags); + dev_info(ice_pf_to_dev(pf), "GNSS init successful\n"); + } else { + ice_gnss_exit(pf); + dev_err(ice_pf_to_dev(pf), "GNSS init failure\n"); + } } /** @@ -510,31 +430,20 @@ void ice_gnss_init(struct ice_pf *pf) */ void ice_gnss_exit(struct ice_pf *pf) { - unsigned int i; + ice_gnss_deregister(pf); + clear_bit(ICE_FLAG_GNSS, pf->flags); - if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver) - return; - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { - if (pf->gnss_tty_port[i]) { - tty_port_destroy(pf->gnss_tty_port[i]); - kfree(pf->gnss_tty_port[i]); - } + if (pf->gnss_serial) { + struct gnss_serial *gnss = pf->gnss_serial; - if (pf->gnss_serial[i]) { - struct gnss_serial *gnss = pf->gnss_serial[i]; + kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); + kthread_destroy_worker(gnss->kworker); + gnss->kworker = NULL; - kthread_cancel_work_sync(&gnss->write_work); - kthread_cancel_delayed_work_sync(&gnss->read_work); - kfree(gnss); - pf->gnss_serial[i] = NULL; - } + kfree(gnss); + pf->gnss_serial = NULL; } - - tty_unregister_driver(pf->ice_gnss_tty_driver); - kfree(pf->ice_gnss_tty_driver->name); - tty_driver_kref_put(pf->ice_gnss_tty_driver); - pf->ice_gnss_tty_driver = NULL; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index f454dd1d9285..4d49e5b0b4b8 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -4,15 +4,8 @@ #ifndef _ICE_GNSS_H_ #define _ICE_GNSS_H_ -#include <linux/tty.h> -#include <linux/tty_flip.h> - #define ICE_E810T_GNSS_I2C_BUS 0x2 #define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ -/* Create 2 minor devices, both using the same GNSS module. First one is RW, - * second one RO. - */ -#define ICE_GNSS_TTY_MINOR_DEVICES 2 #define ICE_GNSS_TTY_WRITE_BUF 250 #define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M) #define ICE_MAX_I2C_WRITE_BYTES 4 @@ -36,13 +29,9 @@ struct gnss_write_buf { unsigned char *buf; }; - /** * struct gnss_serial - data used to initialize GNSS TTY port * @back: back pointer to PF - * @tty: pointer to the tty for this device - * @open_count: number of times this port has been opened - * @gnss_mutex: gnss_mutex used to protect GNSS serial operations * @kworker: kwork thread for handling periodic work * @read_work: read_work function for handling GNSS reads * @write_work: write_work function for handling GNSS writes @@ -50,16 +39,13 @@ struct gnss_write_buf { */ struct gnss_serial { struct ice_pf *back; - struct tty_struct *tty; - int open_count; - struct mutex gnss_mutex; /* protects GNSS serial structure */ struct kthread_worker *kworker; struct kthread_delayed_work read_work; struct kthread_work write_work; struct list_head queue; }; -#if IS_ENABLED(CONFIG_TTY) +#if IS_ENABLED(CONFIG_GNSS) void ice_gnss_init(struct ice_pf *pf); void ice_gnss_exit(struct ice_pf *pf); bool ice_gnss_is_gps_present(struct ice_hw *hw); @@ -70,5 +56,5 @@ static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) { return false; } -#endif /* IS_ENABLED(CONFIG_TTY) */ +#endif /* IS_ENABLED(CONFIG_GNSS) */ #endif /* _ICE_GNSS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 895c32bcc8b5..e6bc2285071e 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -6,6 +6,8 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" +static DEFINE_XARRAY_ALLOC1(ice_aux_id); + /** * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct * @pf: pointer to PF struct @@ -246,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf) } /** + * ice_free_rdma_qvector - free vector resources reserved for RDMA driver + * @pf: board private structure to initialize + */ +static void ice_free_rdma_qvector(struct ice_pf *pf) +{ + pf->num_avail_sw_msix -= pf->num_rdma_msix; + ice_free_res(pf->irq_tracker, pf->rdma_base_vector, + ICE_RES_RDMA_VEC_ID); +} + +/** * ice_adev_release - function to be mapped to AUX dev's release op * @dev: pointer to device to free */ @@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf) struct device *dev = &pf->pdev->dev; int ret; + if (!ice_is_rdma_ena(pf)) { + dev_warn(dev, "RDMA is not supported on this device\n"); + return 0; + } + + ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), + GFP_KERNEL); + if (ret) { + dev_err(dev, "Failed to allocate device ID for AUX driver\n"); + return -ENOMEM; + } + /* Reserve vector resources */ ret = ice_reserve_rdma_qvector(pf); if (ret < 0) { dev_err(dev, "failed to reserve vectors for RDMA\n"); - return ret; + goto err_reserve_rdma_qvector; } pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; - return ice_plug_aux_dev(pf); + ret = ice_plug_aux_dev(pf); + if (ret) + goto err_plug_aux_dev; + return 0; + +err_plug_aux_dev: + ice_free_rdma_qvector(pf); +err_reserve_rdma_qvector: + pf->adev = NULL; + xa_erase(&ice_aux_id, pf->aux_idx); + return ret; +} + +/** + * ice_deinit_rdma - deinitialize RDMA on PF + * @pf: ptr to ice_pf + */ +void ice_deinit_rdma(struct ice_pf *pf) +{ + if (!ice_is_rdma_ena(pf)) + return; + + ice_unplug_aux_dev(pf); + ice_free_rdma_qvector(pf); + xa_erase(&ice_aux_id, pf->aux_idx); } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 94aa834cd9a6..781475480ff2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured - * @vf: the VF associated with this VSI, if any * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi) { enum ice_vsi_type vsi_type = vsi->type; struct ice_pf *pf = vsi->back; + struct ice_vf *vf = vsi->vf; if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) return; @@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr) } /** - * ice_vsi_delete - delete a VSI from the switch + * ice_vsi_delete_from_hw - delete a VSI from the switch * @vsi: pointer to VSI being removed */ -void ice_vsi_delete(struct ice_vsi *vsi) +static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; @@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) } /** - * ice_vsi_clear - clean up and deallocate the provided VSI + * ice_vsi_free_stats - Free the ring statistics structures + * @vsi: VSI pointer + */ +static void ice_vsi_free_stats(struct ice_vsi *vsi) +{ + struct ice_vsi_stats *vsi_stat; + struct ice_pf *pf = vsi->back; + int i; + + if (vsi->type == ICE_VSI_CHNL) + return; + if (!pf->vsi_stats) + return; + + vsi_stat = pf->vsi_stats[vsi->idx]; + if (!vsi_stat) + return; + + ice_for_each_alloc_txq(vsi, i) { + if (vsi_stat->tx_ring_stats[i]) { + kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); + } + } + + ice_for_each_alloc_rxq(vsi, i) { + if (vsi_stat->rx_ring_stats[i]) { + kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); + WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); + } + } + + kfree(vsi_stat->tx_ring_stats); + kfree(vsi_stat->rx_ring_stats); + kfree(vsi_stat); + pf->vsi_stats[vsi->idx] = NULL; +} + +/** + * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI + * @vsi: VSI which is having stats allocated + */ +static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) +{ + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; + struct ice_vsi_stats *vsi_stats; + struct ice_pf *pf = vsi->back; + u16 i; + + vsi_stats = pf->vsi_stats[vsi->idx]; + tx_ring_stats = vsi_stats->tx_ring_stats; + rx_ring_stats = vsi_stats->rx_ring_stats; + + /* Allocate Tx ring stats */ + ice_for_each_alloc_txq(vsi, i) { + struct ice_ring_stats *ring_stats; + struct ice_tx_ring *ring; + + ring = vsi->tx_rings[i]; + ring_stats = tx_ring_stats[i]; + + if (!ring_stats) { + ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); + if (!ring_stats) + goto err_out; + + WRITE_ONCE(tx_ring_stats[i], ring_stats); + } + + ring->ring_stats = ring_stats; + } + + /* Allocate Rx ring stats */ + ice_for_each_alloc_rxq(vsi, i) { + struct ice_ring_stats *ring_stats; + struct ice_rx_ring *ring; + + ring = vsi->rx_rings[i]; + ring_stats = rx_ring_stats[i]; + + if (!ring_stats) { + ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); + if (!ring_stats) + goto err_out; + + WRITE_ONCE(rx_ring_stats[i], ring_stats); + } + + ring->ring_stats = ring_stats; + } + + return 0; + +err_out: + ice_vsi_free_stats(vsi); + return -ENOMEM; +} + +/** + * ice_vsi_free - clean up and deallocate the provided VSI * @vsi: pointer to VSI being cleared * * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure */ -int ice_vsi_clear(struct ice_vsi *vsi) +static void ice_vsi_free(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; struct device *dev; - if (!vsi) - return 0; - - if (!vsi->back) - return -EINVAL; + if (!vsi || !vsi->back) + return; pf = vsi->back; dev = ice_pf_to_dev(pf); if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); - return -EINVAL; + return; } mutex_lock(&pf->sw_mutex); /* updates the PF for this cleared VSI */ pf->vsi[vsi->idx] = NULL; - if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) - pf->next_vsi = vsi->idx; - if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) - pf->next_vsi = vsi->idx; + pf->next_vsi = vsi->idx; + ice_vsi_free_stats(vsi); ice_vsi_free_arrays(vsi); mutex_unlock(&pf->sw_mutex); devm_kfree(dev, vsi); +} - return 0; +void ice_vsi_delete(struct ice_vsi *vsi) +{ + ice_vsi_delete_from_hw(vsi); + ice_vsi_free(vsi); } /** @@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) if (!pf->vsi_stats) return -ENOENT; + if (pf->vsi_stats[vsi->idx]) + /* realloc will happen in rebuild path */ + return 0; + vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); if (!vsi_stat) return -ENOMEM; @@ -491,128 +592,93 @@ err_alloc_tx: } /** - * ice_vsi_alloc - Allocates the next available struct VSI in the PF - * @pf: board private structure - * @vsi_type: type of VSI + * ice_vsi_alloc_def - set default values for already allocated VSI + * @vsi: ptr to VSI * @ch: ptr to channel - * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL - * - * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, - * it may be NULL in the case there is no association with a VF. For - * ICE_VSI_VF the VF pointer *must not* be NULL. - * - * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi * -ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, - struct ice_channel *ch, struct ice_vf *vf) +static int +ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_vsi *vsi = NULL; - - if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) - return NULL; - - /* Need to protect the allocation of the VSIs at the PF level */ - mutex_lock(&pf->sw_mutex); - - /* If we have already allocated our maximum number of VSIs, - * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index - * is available to be populated - */ - if (pf->next_vsi == ICE_NO_VSI) { - dev_dbg(dev, "out of VSI slots!\n"); - goto unlock_pf; + if (vsi->type != ICE_VSI_CHNL) { + ice_vsi_set_num_qs(vsi); + if (ice_vsi_alloc_arrays(vsi)) + return -ENOMEM; } - vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); - if (!vsi) - goto unlock_pf; - - vsi->type = vsi_type; - vsi->back = pf; - set_bit(ICE_VSI_DOWN, vsi->state); - - if (vsi_type == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vf); - else if (vsi_type != ICE_VSI_CHNL) - ice_vsi_set_num_qs(vsi, NULL); - switch (vsi->type) { case ICE_VSI_SWITCHDEV_CTRL: - if (ice_vsi_alloc_arrays(vsi)) - goto err_rings; - /* Setup eswitch MSIX irq handler for VSI */ vsi->irq_handler = ice_eswitch_msix_clean_rings; break; case ICE_VSI_PF: - if (ice_vsi_alloc_arrays(vsi)) - goto err_rings; - /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; case ICE_VSI_CTRL: - if (ice_vsi_alloc_arrays(vsi)) - goto err_rings; - /* Setup ctrl VSI MSIX irq handler */ vsi->irq_handler = ice_msix_clean_ctrl_vsi; - - /* For the PF control VSI this is NULL, for the VF control VSI - * this will be the first VF to allocate it. - */ - vsi->vf = vf; - break; - case ICE_VSI_VF: - if (ice_vsi_alloc_arrays(vsi)) - goto err_rings; - vsi->vf = vf; break; case ICE_VSI_CHNL: if (!ch) - goto err_rings; + return -EINVAL; + vsi->num_rxq = ch->num_rxq; vsi->num_txq = ch->num_txq; vsi->next_base_q = ch->base_q; break; + case ICE_VSI_VF: case ICE_VSI_LB: - if (ice_vsi_alloc_arrays(vsi)) - goto err_rings; break; default: - dev_warn(dev, "Unknown VSI type %d\n", vsi->type); - goto unlock_pf; + ice_vsi_free_arrays(vsi); + return -EINVAL; } - if (vsi->type == ICE_VSI_CTRL && !vf) { - /* Use the last VSI slot as the index for PF control VSI */ - vsi->idx = pf->num_alloc_vsi - 1; - pf->ctrl_vsi_idx = vsi->idx; - pf->vsi[vsi->idx] = vsi; - } else { - /* fill slot and make note of the index */ - vsi->idx = pf->next_vsi; - pf->vsi[pf->next_vsi] = vsi; + return 0; +} + +/** + * ice_vsi_alloc - Allocates the next available struct VSI in the PF + * @pf: board private structure + * + * Reserves a VSI index from the PF and allocates an empty VSI structure + * without a type. The VSI structure must later be initialized by calling + * ice_vsi_cfg(). + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *vsi = NULL; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->sw_mutex); - /* prepare pf->next_vsi for next use */ - pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, - pf->next_vsi); + /* If we have already allocated our maximum number of VSIs, + * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index + * is available to be populated + */ + if (pf->next_vsi == ICE_NO_VSI) { + dev_dbg(dev, "out of VSI slots!\n"); + goto unlock_pf; } - if (vsi->type == ICE_VSI_CTRL && vf) - vf->ctrl_vsi_idx = vsi->idx; + vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); + if (!vsi) + goto unlock_pf; - /* allocate memory for Tx/Rx ring stat pointers */ - if (ice_vsi_alloc_stat_arrays(vsi)) - goto err_rings; + vsi->back = pf; + set_bit(ICE_VSI_DOWN, vsi->state); - goto unlock_pf; + /* fill slot and make note of the index */ + vsi->idx = pf->next_vsi; + pf->vsi[pf->next_vsi] = vsi; + + /* prepare pf->next_vsi for next use */ + pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, + pf->next_vsi); -err_rings: - devm_kfree(dev, vsi); - vsi = NULL; unlock_pf: mutex_unlock(&pf->sw_mutex); return vsi; @@ -1177,12 +1243,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) /** * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured - * @init_vsi: is this call creating a VSI + * @vsi_flags: VSI configuration flags + * + * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to + * reconfigure an existing context. * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) +static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; @@ -1244,7 +1313,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) /* if updating VSI context, make sure to set valid_section: * to indicate which section of VSI context being updated */ - if (!init_vsi) + if (!(vsi_flags & ICE_VSI_FLAG_INIT)) ctxt->info.valid_sections |= cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); } @@ -1257,7 +1326,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (ret) goto out; - if (!init_vsi) /* means VSI being updated */ + if (!(vsi_flags & ICE_VSI_FLAG_INIT)) + /* means VSI being updated */ /* must to indicate which section of VSI context are * being modified */ @@ -1272,7 +1342,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); } - if (init_vsi) { + if (vsi_flags & ICE_VSI_FLAG_INIT) { ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(dev, "Add VSI failed, err %d\n", ret); @@ -1436,7 +1506,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI * - * This should only be called after ice_vsi_alloc() which allocates the + * This should only be called after ice_vsi_alloc_def() which allocates the * corresponding SW VSI structure and initializes num_queue_pairs for the * newly allocated VSI. * @@ -1584,106 +1654,6 @@ err_out: } /** - * ice_vsi_free_stats - Free the ring statistics structures - * @vsi: VSI pointer - */ -static void ice_vsi_free_stats(struct ice_vsi *vsi) -{ - struct ice_vsi_stats *vsi_stat; - struct ice_pf *pf = vsi->back; - int i; - - if (vsi->type == ICE_VSI_CHNL) - return; - if (!pf->vsi_stats) - return; - - vsi_stat = pf->vsi_stats[vsi->idx]; - if (!vsi_stat) - return; - - ice_for_each_alloc_txq(vsi, i) { - if (vsi_stat->tx_ring_stats[i]) { - kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); - WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); - } - } - - ice_for_each_alloc_rxq(vsi, i) { - if (vsi_stat->rx_ring_stats[i]) { - kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); - WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); - } - } - - kfree(vsi_stat->tx_ring_stats); - kfree(vsi_stat->rx_ring_stats); - kfree(vsi_stat); - pf->vsi_stats[vsi->idx] = NULL; -} - -/** - * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI - * @vsi: VSI which is having stats allocated - */ -static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) -{ - struct ice_ring_stats **tx_ring_stats; - struct ice_ring_stats **rx_ring_stats; - struct ice_vsi_stats *vsi_stats; - struct ice_pf *pf = vsi->back; - u16 i; - - vsi_stats = pf->vsi_stats[vsi->idx]; - tx_ring_stats = vsi_stats->tx_ring_stats; - rx_ring_stats = vsi_stats->rx_ring_stats; - - /* Allocate Tx ring stats */ - ice_for_each_alloc_txq(vsi, i) { - struct ice_ring_stats *ring_stats; - struct ice_tx_ring *ring; - - ring = vsi->tx_rings[i]; - ring_stats = tx_ring_stats[i]; - - if (!ring_stats) { - ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); - if (!ring_stats) - goto err_out; - - WRITE_ONCE(tx_ring_stats[i], ring_stats); - } - - ring->ring_stats = ring_stats; - } - - /* Allocate Rx ring stats */ - ice_for_each_alloc_rxq(vsi, i) { - struct ice_ring_stats *ring_stats; - struct ice_rx_ring *ring; - - ring = vsi->rx_rings[i]; - ring_stats = rx_ring_stats[i]; - - if (!ring_stats) { - ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); - if (!ring_stats) - goto err_out; - - WRITE_ONCE(rx_ring_stats[i], ring_stats); - } - - ring->ring_stats = ring_stats; - } - - return 0; - -err_out: - ice_vsi_free_stats(vsi); - return -ENOMEM; -} - -/** * ice_vsi_manage_rss_lut - disable/enable RSS * @vsi: the VSI being changed * @ena: boolean value indicating if this is an enable or disable request @@ -1992,8 +1962,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi) void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) { if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_2048; + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; + vsi->rx_buf_len = ICE_RXBUF_1664; #if (PAGE_SIZE < 8192) } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && (vsi->netdev->mtu <= ETH_DATA_LEN)) { @@ -2002,11 +1972,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) #endif } else { vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; -#if (PAGE_SIZE < 8192) vsi->rx_buf_len = ICE_RXBUF_3072; -#else - vsi->rx_buf_len = ICE_RXBUF_2048; -#endif } } @@ -2645,54 +2611,97 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) } /** - * ice_vsi_setup - Set up a VSI by a given type - * @pf: board private structure - * @pi: pointer to the port_info instance - * @vsi_type: VSI type - * @vf: pointer to VF to which this VSI connects. This field is used primarily - * for the ICE_VSI_VF type. Other VSI types should pass NULL. - * @ch: ptr to channel - * - * This allocates the sw VSI structure and its queue resources. + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for * - * Returns pointer to the successfully allocated and configured VSI sw struct on - * success, NULL on failure. + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it. */ -struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, struct ice_vf *vf, - struct ice_channel *ch) +static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct ice_vf *vf; + unsigned int bkt; + + rcu_read_lock(); + ice_for_each_vf_rcu(pf, bkt, vf) { + if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + + /* No other VFs left that have control VSI. It is now safe to reclaim + * SW interrupts back to the common pool. + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ICE_RES_VF_CTRL_VEC_ID); + pf->num_avail_sw_msix += vsi->num_q_vectors; +} + +static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); - struct ice_vsi *vsi; int ret, i; - if (vsi_type == ICE_VSI_CHNL) - vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); - else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) - vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); - else - vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL); + /* configure VSI nodes based on number of queues and TC's */ + ice_for_each_traffic_class(i) { + if (!(vsi->tc_cfg.ena_tc & BIT(i))) + continue; - if (!vsi) { - dev_err(dev, "could not allocate VSI\n"); - return NULL; + if (vsi->type == ICE_VSI_CHNL) { + if (!vsi->alloc_txq && vsi->num_txq) + max_txqs[i] = vsi->num_txq; + else + max_txqs[i] = pf->num_lan_tx; + } else { + max_txqs[i] = vsi->alloc_txq; + } } - vsi->port_info = pi; + dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); + return ret; + } + + return 0; +} + +/** + * ice_vsi_cfg_def - configure default VSI based on the type + * @vsi: pointer to VSI + * @params: the parameters to configure this VSI with + */ +static int +ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_pf *pf = vsi->back; + int ret; + vsi->vsw = pf->first_sw; - if (vsi->type == ICE_VSI_PF) - vsi->ethtype = ETH_P_PAUSE; + + ret = ice_vsi_alloc_def(vsi, params->ch); + if (ret) + return ret; + + /* allocate memory for Tx/Rx ring stat pointers */ + if (ice_vsi_alloc_stat_arrays(vsi)) + goto unroll_vsi_alloc; ice_alloc_fd_res(vsi); - if (vsi_type != ICE_VSI_CHNL) { - if (ice_vsi_get_qs(vsi)) { - dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", - vsi->idx); - goto unroll_vsi_alloc; - } + if (ice_vsi_get_qs(vsi)) { + dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", + vsi->idx); + goto unroll_vsi_alloc_stat; } /* set RSS capabilities */ @@ -2702,7 +2711,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi, true); + ret = ice_vsi_init(vsi, params->flags); if (ret) goto unroll_get_qs; @@ -2733,6 +2742,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + if (ice_is_xdp_ena_vsi(vsi)) { + ret = ice_vsi_determine_xdp_res(vsi); + if (ret) + goto unroll_vector_base; + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + if (ret) + goto unroll_vector_base; + } /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) @@ -2797,30 +2814,156 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, goto unroll_vsi_init; } - /* configure VSI nodes based on number of queues and TC's */ - ice_for_each_traffic_class(i) { - if (!(vsi->tc_cfg.ena_tc & BIT(i))) - continue; + return 0; - if (vsi->type == ICE_VSI_CHNL) { - if (!vsi->alloc_txq && vsi->num_txq) - max_txqs[i] = vsi->num_txq; - else - max_txqs[i] = pf->num_lan_tx; +unroll_vector_base: + /* reclaim SW interrupts back to the common pool */ + ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; +unroll_alloc_q_vector: + ice_vsi_free_q_vectors(vsi); +unroll_vsi_init: + ice_vsi_delete_from_hw(vsi); +unroll_get_qs: + ice_vsi_put_qs(vsi); +unroll_vsi_alloc_stat: + ice_vsi_free_stats(vsi); +unroll_vsi_alloc: + ice_vsi_free_arrays(vsi); + return ret; +} + +/** + * ice_vsi_cfg - configure a previously allocated VSI + * @vsi: pointer to VSI + * @params: parameters used to configure this VSI + */ +int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ + struct ice_pf *pf = vsi->back; + int ret; + + if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) + return -EINVAL; + + vsi->type = params->type; + vsi->port_info = params->pi; + + /* For VSIs which don't have a connected VF, this will be NULL */ + vsi->vf = params->vf; + + ret = ice_vsi_cfg_def(vsi, params); + if (ret) + return ret; + + ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); + if (ret) + ice_vsi_decfg(vsi); + + if (vsi->type == ICE_VSI_CTRL) { + if (vsi->vf) { + WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); + vsi->vf->ctrl_vsi_idx = vsi->idx; } else { - max_txqs[i] = vsi->alloc_txq; + WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); + pf->ctrl_vsi_idx = vsi->idx; } } - dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (ret) { - dev_err(dev, "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, ret); - goto unroll_clear_rings; + return ret; +} + +/** + * ice_vsi_decfg - remove all VSI configuration + * @vsi: pointer to VSI + */ +void ice_vsi_decfg(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int err; + + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + + ice_fltr_remove_all(vsi); + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); + err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + if (err) + dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", + vsi->vsi_num, err); + + if (ice_is_xdp_ena_vsi(vsi)) + /* return value check can be skipped here, it always returns + * 0 if reset is in progress + */ + ice_destroy_xdp_rings(vsi); + + ice_vsi_clear_rings(vsi); + ice_vsi_free_q_vectors(vsi); + ice_vsi_put_qs(vsi); + ice_vsi_free_arrays(vsi); + + /* SR-IOV determines needed MSIX resources all at once instead of per + * VSI since when VFs are spawned we know how many VFs there are and how + * many interrupts each VF needs. SR-IOV MSIX resources are also + * cleared in the same manner. + */ + if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + ice_free_vf_ctrl_res(pf, vsi); + } else if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + vsi->base_vector = 0; + } + + if (vsi->type == ICE_VSI_VF && + vsi->agg_node && vsi->agg_node->valid) + vsi->agg_node->num_vsis--; + if (vsi->agg_node) { + vsi->agg_node->valid = false; + vsi->agg_node->agg_id = 0; + } +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @params: parameters to use when creating the VSI + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *vsi; + int ret; + + /* ice_vsi_setup can only initialize a new VSI, and we must have + * a port_info structure for it. + */ + if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || + WARN_ON(!params->pi)) + return NULL; + + vsi = ice_vsi_alloc(pf); + if (!vsi) { + dev_err(dev, "could not allocate VSI\n"); + return NULL; } + ret = ice_vsi_cfg(vsi, params); + if (ret) + goto err_vsi_cfg; + /* Add switch rule to drop all Tx Flow Control Frames, of look up * type ETHERTYPE from VSIs, and restrict malicious VF from sending * out PAUSE or PFC frames. If enabled, FW can still send FC frames. @@ -2830,34 +2973,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * be dropped so that VFs cannot send LLDP packets to reconfig DCB * settings in the HW. */ - if (!ice_is_safe_mode(pf)) - if (vsi->type == ICE_VSI_PF) { - ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, - ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, true); - } + if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { + ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, + ICE_DROP_PACKET); + ice_cfg_sw_lldp(vsi, true, true); + } if (!vsi->agg_node) ice_set_agg_vsi(vsi); + return vsi; -unroll_clear_rings: - ice_vsi_clear_rings(vsi); -unroll_vector_base: - /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; -unroll_alloc_q_vector: - ice_vsi_free_q_vectors(vsi); -unroll_vsi_init: - ice_vsi_free_stats(vsi); - ice_vsi_delete(vsi); -unroll_get_qs: - ice_vsi_put_qs(vsi); -unroll_vsi_alloc: - if (vsi_type == ICE_VSI_VF) +err_vsi_cfg: + if (params->type == ICE_VSI_VF) ice_enable_lag(pf->lag); - ice_vsi_clear(vsi); + ice_vsi_free(vsi); return NULL; } @@ -3121,37 +3251,6 @@ void ice_napi_del(struct ice_vsi *vsi) } /** - * ice_free_vf_ctrl_res - Free the VF control VSI resource - * @pf: pointer to PF structure - * @vsi: the VSI to free resources for - * - * Check if the VF control VSI resource is still in use. If no VF is using it - * any more, release the VSI resource. Otherwise, leave it to be cleaned up - * once no other VF uses it. - */ -static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) -{ - struct ice_vf *vf; - unsigned int bkt; - - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) { - if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { - rcu_read_unlock(); - return; - } - } - rcu_read_unlock(); - - /* No other VFs left that have control VSI. It is now safe to reclaim - * SW interrupts back to the common pool. - */ - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; -} - -/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -3160,7 +3259,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi) { struct ice_pf *pf; - int err; if (!vsi->back) return -ENODEV; @@ -3178,50 +3276,14 @@ int ice_vsi_release(struct ice_vsi *vsi) clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } + if (vsi->type == ICE_VSI_PF) + ice_devlink_destroy_pf_port(pf); + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); - /* Disable VSI and free resources */ - if (vsi->type != ICE_VSI_LB) - ice_vsi_dis_irq(vsi); ice_vsi_close(vsi); - - /* SR-IOV determines needed MSIX resources all at once instead of per - * VSI since when VFs are spawned we know how many VFs there are and how - * many interrupts each VF needs. SR-IOV MSIX resources are also - * cleared in the same manner. - */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf) { - ice_free_vf_ctrl_res(pf, vsi); - } else if (vsi->type != ICE_VSI_VF) { - /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - } - - if (!ice_is_safe_mode(pf)) { - if (vsi->type == ICE_VSI_PF) { - ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, - ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, false); - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); - } - } - - if (ice_is_vsi_dflt_vsi(vsi)) - ice_clear_dflt_vsi(vsi); - ice_fltr_remove_all(vsi); - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); - err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); - if (err) - dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", - vsi->vsi_num, err); - ice_vsi_delete(vsi); - ice_vsi_free_q_vectors(vsi); + ice_vsi_decfg(vsi); if (vsi->netdev) { if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { @@ -3235,22 +3297,12 @@ int ice_vsi_release(struct ice_vsi *vsi) } } - if (vsi->type == ICE_VSI_PF) - ice_devlink_destroy_pf_port(pf); - - if (vsi->type == ICE_VSI_VF && - vsi->agg_node && vsi->agg_node->valid) - vsi->agg_node->num_vsis--; - ice_vsi_clear_rings(vsi); - ice_vsi_free_stats(vsi); - ice_vsi_put_qs(vsi); - /* retain SW VSI data structure since it is needed to unregister and * free VSI netdev when PF is not in reset recovery pending state,\ * for ex: during rmmod. */ if (!ice_is_reset_in_progress(pf->state)) - ice_vsi_clear(vsi); + ice_vsi_delete(vsi); return 0; } @@ -3375,7 +3427,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, * @prev_txq: Number of Tx rings before ring reallocation * @prev_rxq: Number of Rx rings before ring reallocation */ -static int +static void ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) { struct ice_vsi_stats *vsi_stat; @@ -3383,9 +3435,9 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) int i; if (!prev_txq || !prev_rxq) - return 0; + return; if (vsi->type == ICE_VSI_CHNL) - return 0; + return; vsi_stat = pf->vsi_stats[vsi->idx]; @@ -3406,36 +3458,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) } } } - - return 0; } /** * ice_vsi_rebuild - Rebuild VSI after reset * @vsi: VSI to be rebuild - * @init_vsi: is this an initialization or a reconfigure of the VSI + * @vsi_flags: flags used for VSI rebuild flow + * + * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or + * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware. * * Returns 0 on success and negative value on failure */ -int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; - int ret, i, prev_txq, prev_rxq; + int ret, prev_txq, prev_rxq; int prev_num_q_vectors = 0; - enum ice_vsi_type vtype; struct ice_pf *pf; if (!vsi) return -EINVAL; + params = ice_vsi_to_params(vsi); + params.flags = vsi_flags; + pf = vsi->back; - vtype = vsi->type; - if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf)) + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; - ice_vsi_init_vlan_ops(vsi); - coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); if (!coalesce) @@ -3446,188 +3498,32 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) prev_txq = vsi->num_txq; prev_rxq = vsi->num_rxq; - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); - ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); + ice_vsi_decfg(vsi); + ret = ice_vsi_cfg_def(vsi, ¶ms); if (ret) - dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", - vsi->vsi_num, ret); - ice_vsi_free_q_vectors(vsi); - - /* SR-IOV determines needed MSIX resources all at once instead of per - * VSI since when VFs are spawned we know how many VFs there are and how - * many interrupts each VF needs. SR-IOV MSIX resources are also - * cleared in the same manner. - */ - if (vtype != ICE_VSI_VF) { - /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - vsi->base_vector = 0; - } - - if (ice_is_xdp_ena_vsi(vsi)) - /* return value check can be skipped here, it always returns - * 0 if reset is in progress - */ - ice_destroy_xdp_rings(vsi); - ice_vsi_put_qs(vsi); - ice_vsi_clear_rings(vsi); - ice_vsi_free_arrays(vsi); - if (vtype == ICE_VSI_VF) - ice_vsi_set_num_qs(vsi, vsi->vf); - else - ice_vsi_set_num_qs(vsi, NULL); - - ret = ice_vsi_alloc_arrays(vsi); - if (ret < 0) - goto err_vsi; - - ice_vsi_get_qs(vsi); - - ice_alloc_fd_res(vsi); - ice_vsi_set_tc_cfg(vsi); - - /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_init(vsi, init_vsi); - if (ret < 0) - goto err_vsi; - - switch (vtype) { - case ICE_VSI_CTRL: - case ICE_VSI_SWITCHDEV_CTRL: - case ICE_VSI_PF: - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_ring_stats(vsi); - if (ret) - goto err_vectors; - - ice_vsi_map_rings_to_vectors(vsi); - - vsi->stat_offsets_loaded = false; - if (ice_is_xdp_ena_vsi(vsi)) { - ret = ice_vsi_determine_xdp_res(vsi); - if (ret) - goto err_vectors; - ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); - if (ret) - goto err_vectors; - } - /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ - if (vtype != ICE_VSI_CTRL) - /* Do not exit if configuring RSS had an issue, at - * least receive traffic on first queue. Hence no - * need to capture return value - */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_vsi_cfg_rss_lut_key(vsi); - - /* disable or enable CRC stripping */ - if (vsi->netdev) - ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & - NETIF_F_RXFCS)); - - break; - case ICE_VSI_VF: - ret = ice_vsi_alloc_q_vectors(vsi); - if (ret) - goto err_rings; - - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_rings(vsi); - if (ret) - goto err_vectors; - - ret = ice_vsi_alloc_ring_stats(vsi); - if (ret) - goto err_vectors; - - vsi->stat_offsets_loaded = false; - break; - case ICE_VSI_CHNL: - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - ice_vsi_cfg_rss_lut_key(vsi); - ice_vsi_set_rss_flow_fld(vsi); - } - break; - default: - break; - } - - /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) { - /* configure VSI nodes based on number of queues and TC's. - * ADQ creates VSIs for each TC/Channel but doesn't - * allocate queues instead it reconfigures the PF queues - * as per the TC command. So max_txqs should point to the - * PF Tx queues. - */ - if (vtype == ICE_VSI_CHNL) - max_txqs[i] = pf->num_lan_tx; - else - max_txqs[i] = vsi->alloc_txq; - - if (ice_is_xdp_ena_vsi(vsi)) - max_txqs[i] += vsi->num_xdp_txq; - } - - if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - /* If MQPRIO is set, means channel code path, hence for main - * VSI's, use TC as 1 - */ - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); - else - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + goto err_vsi_cfg; + ret = ice_vsi_cfg_tc_lan(pf, vsi); if (ret) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, ret); - if (init_vsi) { + if (vsi_flags & ICE_VSI_FLAG_INIT) { ret = -EIO; - goto err_vectors; + goto err_vsi_cfg_tc_lan; } else { + kfree(coalesce); return ice_schedule_reset(pf, ICE_RESET_PFR); } } - if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq)) - goto err_vectors; + ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); kfree(coalesce); return 0; -err_vectors: - ice_vsi_free_q_vectors(vsi); -err_rings: - if (vsi->netdev) { - vsi->current_netdev_flags = 0; - unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } -err_vsi: - ice_vsi_clear(vsi); - set_bit(ICE_RESET_FAILED, pf->state); +err_vsi_cfg_tc_lan: + ice_vsi_decfg(vsi); +err_vsi_cfg: kfree(coalesce); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index dcdf69a693e9..75221478f2dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -7,6 +7,47 @@ #include "ice.h" #include "ice_vlan.h" +/* Flags used for VSI configuration and rebuild */ +#define ICE_VSI_FLAG_INIT BIT(0) +#define ICE_VSI_FLAG_NO_INIT 0 + +/** + * struct ice_vsi_cfg_params - VSI configuration parameters + * @pi: pointer to the port_info instance for the VSI + * @ch: pointer to the channel structure for the VSI, may be NULL + * @vf: pointer to the VF associated with this VSI, may be NULL + * @type: the type of VSI to configure + * @flags: VSI flags used for rebuild and configuration + * + * Parameter structure used when configuring a new VSI. + */ +struct ice_vsi_cfg_params { + struct ice_port_info *pi; + struct ice_channel *ch; + struct ice_vf *vf; + enum ice_vsi_type type; + u32 flags; +}; + +/** + * ice_vsi_to_params - Get parameters for an existing VSI + * @vsi: the VSI to get parameters for + * + * Fill a parameter structure for reconfiguring a VSI with its current + * parameters, such as during a rebuild operation. + */ +static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi) +{ + struct ice_vsi_cfg_params params = {}; + + params.pi = vsi->port_info; + params.ch = vsi->ch; + params.vf = vsi->vf; + params.type = vsi->type; + + return params; +} + const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); bool ice_pf_state_is_nominal(struct ice_pf *pf); @@ -42,7 +83,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); int ice_set_link(struct ice_vsi *vsi, bool ena); void ice_vsi_delete(struct ice_vsi *vsi); -int ice_vsi_clear(struct ice_vsi *vsi); int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); @@ -51,9 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi); void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type vsi_type, struct ice_vf *vf, - struct ice_channel *ch); +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); void ice_napi_del(struct ice_vsi *vsi); @@ -63,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi); int ice_ena_vsi(struct ice_vsi *vsi, bool locked); +void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); @@ -70,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); int ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); -int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi); +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); +int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a9a7f8b52140..567694bf098b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -22,6 +22,7 @@ #include "ice_eswitch.h" #include "ice_tc_lib.h" #include "ice_vsi_vlan_ops.h" +#include <net/xdp_sock_drv.h> #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" static const char ice_driver_string[] = DRV_SUMMARY; @@ -44,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ -static DEFINE_IDA(ice_aux_ida); DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); EXPORT_SYMBOL(ice_xdp_locking_key); @@ -275,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) if (status && status != -EEXIST) return status; + netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", + vsi->vsi_num, promisc_m); return 0; } @@ -300,6 +302,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) promisc_m, 0); } + netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", + vsi->vsi_num, promisc_m); return status; } @@ -414,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } err = 0; vlan_ops->dis_rx_filtering(vsi); + + /* promiscuous mode implies allmulticast so + * that VSIs that are in promiscuous mode are + * subscribed to multicast packets coming to + * the port + */ + err = ice_set_promisc(vsi, + ICE_MCAST_PROMISC_BITS); + if (err) + goto out_promisc; } } else { /* Clear Rx filter to remove traffic from wire */ @@ -430,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) NETIF_F_HW_VLAN_CTAG_FILTER) vlan_ops->ena_rx_filtering(vsi); } + + /* disable allmulti here, but only if allmulti is not + * still enabled for the netdev + */ + if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { + err = ice_clear_promisc(vsi, + ICE_MCAST_PROMISC_BITS); + if (err) { + netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", + err, vsi->vsi_num); + } + } } } goto exit; @@ -538,7 +564,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) /* Disable VFs until reset is completed */ mutex_lock(&pf->vfs.table_lock); ice_for_each_vf(pf, bkt, vf) - ice_set_vf_state_qs_dis(vf); + ice_set_vf_state_dis(vf); mutex_unlock(&pf->vfs.table_lock); if (ice_is_eswitch_mode_switchdev(pf)) { @@ -2570,8 +2596,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) xdp_ring->netdev = NULL; xdp_ring->dev = dev; xdp_ring->count = vsi->num_tx_desc; - xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1; - xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1; WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; @@ -2863,6 +2887,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) } /** + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @vsi: Pointer to VSI structure + */ +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) +{ + if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) + return ICE_RXBUF_1664; + else + return ICE_RXBUF_3072; +} + +/** * ice_xdp_setup_prog - Add or remove XDP eBPF program * @vsi: VSI to setup XDP for * @prog: XDP program @@ -2872,13 +2908,16 @@ static int ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, struct netlink_ext_ack *extack) { - int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; + unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; bool if_running = netif_running(vsi->netdev); int ret = 0, xdp_ring_err = 0; - if (frame_size > vsi->rx_buf_len) { - NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); - return -EOPNOTSUPP; + if (prog && !prog->aux->xdp_has_frags) { + if (frame_size > ice_max_xdp_frame_size(vsi)) { + NL_SET_ERR_MSG_MOD(extack, + "MTU is too large for linear frames and XDP prog does not support frags"); + return -EOPNOTSUPP; + } } /* need to stop netdev while setting up the program for Rx rings */ @@ -2899,11 +2938,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } + xdp_features_set_redirect_target(vsi->netdev, true); /* reallocate Rx queues that are used for zero-copy */ xdp_ring_err = ice_realloc_zc_buf(vsi, true); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { + xdp_features_clear_redirect_target(vsi->netdev); xdp_ring_err = ice_destroy_xdp_rings(vsi); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); @@ -3318,10 +3359,11 @@ static void ice_napi_add(struct ice_vsi *vsi) /** * ice_set_ops - set netdev and ethtools ops for the given netdev - * @netdev: netdev instance + * @vsi: the VSI associated with the new netdev */ -static void ice_set_ops(struct net_device *netdev) +static void ice_set_ops(struct ice_vsi *vsi) { + struct net_device *netdev = vsi->netdev; struct ice_pf *pf = ice_netdev_to_pf(netdev); if (ice_is_safe_mode(pf)) { @@ -3333,6 +3375,13 @@ static void ice_set_ops(struct net_device *netdev) netdev->netdev_ops = &ice_netdev_ops; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; ice_set_ethtool_ops(netdev); + + if (vsi->type != ICE_VSI_PF) + return; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; } /** @@ -3421,53 +3470,8 @@ static void ice_set_netdev_features(struct net_device *netdev) * be changed at runtime */ netdev->hw_features |= NETIF_F_RXFCS; -} - -/** - * ice_cfg_netdev - Allocate, configure and register a netdev - * @vsi: the VSI associated with the new netdev - * - * Returns 0 on success, negative value on failure - */ -static int ice_cfg_netdev(struct ice_vsi *vsi) -{ - struct ice_netdev_priv *np; - struct net_device *netdev; - u8 mac_addr[ETH_ALEN]; - - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; - set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); - vsi->netdev = netdev; - np = netdev_priv(netdev); - np->vsi = vsi; - - ice_set_netdev_features(netdev); - - ice_set_ops(netdev); - - if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); - ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - eth_hw_addr_set(netdev, mac_addr); - ether_addr_copy(netdev->perm_addr, mac_addr); - } - - netdev->priv_flags |= IFF_UNICAST_FLT; - - /* Setup netdev TC information */ - ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); - - /* setup watchdog timeout value to be 5 second */ - netdev->watchdog_timeo = 5 * HZ; - - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = ICE_MAX_MTU; - - return 0; + netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); } /** @@ -3495,14 +3499,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); + struct ice_vsi_cfg_params params = {}; + + params.type = ICE_VSI_PF; + params.pi = pi; + params.flags = ICE_VSI_FLAG_INIT; + + return ice_vsi_setup(pf, ¶ms); } static struct ice_vsi * ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_channel *ch) { - return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); + struct ice_vsi_cfg_params params = {}; + + params.type = ICE_VSI_CHNL; + params.pi = pi; + params.ch = ch; + params.flags = ICE_VSI_FLAG_INIT; + + return ice_vsi_setup(pf, ¶ms); } /** @@ -3516,7 +3533,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, static struct ice_vsi * ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); + struct ice_vsi_cfg_params params = {}; + + params.type = ICE_VSI_CTRL; + params.pi = pi; + params.flags = ICE_VSI_FLAG_INIT; + + return ice_vsi_setup(pf, ¶ms); } /** @@ -3530,7 +3553,13 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi * ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) { - return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); + struct ice_vsi_cfg_params params = {}; + + params.type = ICE_VSI_LB; + params.pi = pi; + params.flags = ICE_VSI_FLAG_INIT; + + return ice_vsi_setup(pf, ¶ms); } /** @@ -3690,20 +3719,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) } /** - * ice_tc_indir_block_remove - clean indirect TC block notifications - * @pf: PF structure - */ -static void ice_tc_indir_block_remove(struct ice_pf *pf) -{ - struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); - - if (!pf_vsi) - return; - - ice_tc_indir_block_unregister(pf_vsi); -} - -/** * ice_tc_indir_block_register - Register TC indirect block notifications * @vsi: VSI struct which has the netdev * @@ -3723,78 +3738,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi) } /** - * ice_setup_pf_sw - Setup the HW switch on startup or after reset - * @pf: board private structure - * - * Returns 0 on success, negative value on failure - */ -static int ice_setup_pf_sw(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - bool dvm = ice_is_dvm_ena(&pf->hw); - struct ice_vsi *vsi; - int status; - - if (ice_is_reset_in_progress(pf->state)) - return -EBUSY; - - status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); - if (status) - return -EIO; - - vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); - if (!vsi) - return -ENOMEM; - - /* init channel list */ - INIT_LIST_HEAD(&vsi->ch_list); - - status = ice_cfg_netdev(vsi); - if (status) - goto unroll_vsi_setup; - /* netdev has to be configured before setting frame size */ - ice_vsi_cfg_frame_size(vsi); - - /* init indirect block notifications */ - status = ice_tc_indir_block_register(vsi); - if (status) { - dev_err(dev, "Failed to register netdev notifier\n"); - goto unroll_cfg_netdev; - } - - /* Setup DCB netlink interface */ - ice_dcbnl_setup(vsi); - - /* registering the NAPI handler requires both the queues and - * netdev to be created, which are done in ice_pf_vsi_setup() - * and ice_cfg_netdev() respectively - */ - ice_napi_add(vsi); - - status = ice_init_mac_fltr(pf); - if (status) - goto unroll_napi_add; - - return 0; - -unroll_napi_add: - ice_tc_indir_block_unregister(vsi); -unroll_cfg_netdev: - if (vsi) { - ice_napi_del(vsi); - if (vsi->netdev) { - clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } - } - -unroll_vsi_setup: - ice_vsi_release(vsi); - return status; -} - -/** * ice_get_avail_q_count - Get count of queues in use * @pf_qmap: bitmap to get queue use count from * @lock: pointer to a mutex that protects access to pf_qmap @@ -4195,12 +4138,13 @@ bool ice_is_wol_supported(struct ice_hw *hw) * @vsi: VSI being changed * @new_rx: new number of Rx queues * @new_tx: new number of Tx queues + * @locked: is adev device_lock held * * Only change the number of queues if new_tx, or new_rx is non-0. * * Returns 0 on success. */ -int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) { struct ice_pf *pf = vsi->back; int err = 0, timeout = 50; @@ -4222,14 +4166,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - ice_vsi_rebuild(vsi, false); + ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); goto done; } ice_vsi_close(vsi); - ice_vsi_rebuild(vsi, false); - ice_pf_dcb_recfg(pf); + ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); done: clear_bit(ICE_CFG_BUSY, pf->state); @@ -4491,6 +4435,23 @@ err_vsi_open: return err; } +static void ice_deinit_fdir(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); + + if (!vsi) + return; + + ice_vsi_manage_fdir(vsi, false); + ice_vsi_release(vsi); + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { + pf->vsi[pf->ctrl_vsi_idx] = NULL; + pf->ctrl_vsi_idx = ICE_NO_VSI; + } + + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); +} + /** * ice_get_opt_fw_name - return optional firmware file name or NULL * @pf: pointer to the PF instance @@ -4590,123 +4551,172 @@ static void ice_print_wake_reason(struct ice_pf *pf) } /** - * ice_register_netdev - register netdev and devlink port - * @pf: pointer to the PF struct + * ice_register_netdev - register netdev + * @vsi: pointer to the VSI struct */ -static int ice_register_netdev(struct ice_pf *pf) +static int ice_register_netdev(struct ice_vsi *vsi) { - struct ice_vsi *vsi; - int err = 0; + int err; - vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->netdev) return -EIO; - err = ice_devlink_create_pf_port(pf); - if (err) - goto err_devlink_create; - - SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); err = register_netdev(vsi->netdev); if (err) - goto err_register_netdev; + return err; set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); return 0; -err_register_netdev: - ice_devlink_destroy_pf_port(pf); -err_devlink_create: - free_netdev(vsi->netdev); - vsi->netdev = NULL; - clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); - return err; +} + +static void ice_unregister_netdev(struct ice_vsi *vsi) +{ + if (!vsi || !vsi->netdev) + return; + + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); } /** - * ice_probe - Device initialization routine - * @pdev: PCI device information struct - * @ent: entry in ice_pci_tbl + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev * - * Returns 0 on success, negative on failure + * Returns 0 on success, negative value on failure */ -static int -ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) +static int ice_cfg_netdev(struct ice_vsi *vsi) { - struct device *dev = &pdev->dev; - struct ice_pf *pf; - struct ice_hw *hw; - int i, err; + struct ice_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; - if (pdev->is_virtfn) { - dev_err(dev, "can't probe a virtual function\n"); - return -EINVAL; + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + ice_set_ops(vsi); + + if (vsi->type == ICE_VSI_PF) { + SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); + eth_hw_addr_set(netdev, mac_addr); } - /* this driver uses devres, see - * Documentation/driver-api/driver-model/devres.rst - */ - err = pcim_enable_device(pdev); + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Setup netdev TC information */ + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); + + netdev->max_mtu = ICE_MAX_MTU; + + return 0; +} + +static void ice_decfg_netdev(struct ice_vsi *vsi) +{ + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + free_netdev(vsi->netdev); + vsi->netdev = NULL; +} + +static int ice_start_eth(struct ice_vsi *vsi) +{ + int err; + + err = ice_init_mac_fltr(vsi->back); if (err) return err; - err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); - if (err) { - dev_err(dev, "BAR0 I/O map error %d\n", err); - return err; - } + rtnl_lock(); + err = ice_vsi_open(vsi); + rtnl_unlock(); - pf = ice_allocate_pf(dev); - if (!pf) - return -ENOMEM; + return err; +} - /* initialize Auxiliary index to invalid value */ - pf->aux_idx = -1; +static int ice_init_eth(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + int err; - /* set up for high or low DMA */ - err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(dev, "DMA configuration failed: 0x%x\n", err); + if (!vsi) + return -EINVAL; + + /* init channel list */ + INIT_LIST_HEAD(&vsi->ch_list); + + err = ice_cfg_netdev(vsi); + if (err) return err; - } + /* Setup DCB netlink interface */ + ice_dcbnl_setup(vsi); - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); + err = ice_init_mac_fltr(pf); + if (err) + goto err_init_mac_fltr; - pf->pdev = pdev; - pci_set_drvdata(pdev, pf); - set_bit(ICE_DOWN, pf->state); - /* Disable service task until DOWN bit is cleared */ - set_bit(ICE_SERVICE_DIS, pf->state); + err = ice_devlink_create_pf_port(pf); + if (err) + goto err_devlink_create_pf_port; - hw = &pf->hw; - hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; - pci_save_state(pdev); + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); - hw->back = pf; - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - hw->bus.device = PCI_SLOT(pdev->devfn); - hw->bus.func = PCI_FUNC(pdev->devfn); - ice_set_ctrlq_len(hw); + err = ice_register_netdev(vsi); + if (err) + goto err_register_netdev; - pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + err = ice_tc_indir_block_register(vsi); + if (err) + goto err_tc_indir_block_register; -#ifndef CONFIG_DYNAMIC_DEBUG - if (debug < -1) - hw->debug_mask = debug; -#endif + ice_napi_add(vsi); + + return 0; + +err_tc_indir_block_register: + ice_unregister_netdev(vsi); +err_register_netdev: + ice_devlink_destroy_pf_port(pf); +err_devlink_create_pf_port: +err_init_mac_fltr: + ice_decfg_netdev(vsi); + return err; +} + +static void ice_deinit_eth(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + + if (!vsi) + return; + + ice_vsi_close(vsi); + ice_unregister_netdev(vsi); + ice_devlink_destroy_pf_port(pf); + ice_tc_indir_block_unregister(vsi); + ice_decfg_netdev(vsi); +} + +static int ice_init_dev(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int err; err = ice_init_hw(hw); if (err) { dev_err(dev, "ice_init_hw failed: %d\n", err); - err = -EIO; - goto err_exit_unroll; + return err; } ice_init_feature_support(pf); @@ -4729,62 +4739,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); - goto err_init_pf_unroll; + goto err_init_pf; } - ice_devlink_init_regions(pf); - pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; - i = 0; if (pf->hw.tnl.valid_count[TNL_VXLAN]) { - pf->hw.udp_tunnel_nic.tables[i].n_entries = + pf->hw.udp_tunnel_nic.tables[0].n_entries = pf->hw.tnl.valid_count[TNL_VXLAN]; - pf->hw.udp_tunnel_nic.tables[i].tunnel_types = + pf->hw.udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; - i++; } if (pf->hw.tnl.valid_count[TNL_GENEVE]) { - pf->hw.udp_tunnel_nic.tables[i].n_entries = + pf->hw.udp_tunnel_nic.tables[1].n_entries = pf->hw.tnl.valid_count[TNL_GENEVE]; - pf->hw.udp_tunnel_nic.tables[i].tunnel_types = + pf->hw.udp_tunnel_nic.tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE; - i++; - } - - pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; - if (!pf->num_alloc_vsi) { - err = -EIO; - goto err_init_pf_unroll; - } - if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { - dev_warn(&pf->pdev->dev, - "limiting the VSI count due to UDP tunnel limitation %d > %d\n", - pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); - pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; - } - - pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), - GFP_KERNEL); - if (!pf->vsi) { - err = -ENOMEM; - goto err_init_pf_unroll; - } - - pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, - sizeof(*pf->vsi_stats), GFP_KERNEL); - if (!pf->vsi_stats) { - err = -ENOMEM; - goto err_init_vsi_unroll; } err = ice_init_interrupt_scheme(pf); if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_vsi_stats_unroll; + goto err_init_interrupt_scheme; } /* In case of MSIX we are going to setup the misc vector right here @@ -4795,49 +4774,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_init_interrupt_unroll; + goto err_req_irq_msix_misc; } - /* create switch struct for the switch element created by FW on boot */ - pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); - if (!pf->first_sw) { - err = -ENOMEM; - goto err_msix_misc_unroll; - } + return 0; - if (hw->evb_veb) - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; - else - pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; +err_req_irq_msix_misc: + ice_clear_interrupt_scheme(pf); +err_init_interrupt_scheme: + ice_deinit_pf(pf); +err_init_pf: + ice_deinit_hw(hw); + return err; +} - pf->first_sw->pf = pf; +static void ice_deinit_dev(struct ice_pf *pf) +{ + ice_free_irq_msix_misc(pf); + ice_clear_interrupt_scheme(pf); + ice_deinit_pf(pf); + ice_deinit_hw(&pf->hw); +} - /* record the sw_id available for later use */ - pf->first_sw->sw_id = hw->port_info->sw_id; +static void ice_init_features(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); - err = ice_setup_pf_sw(pf); - if (err) { - dev_err(dev, "probe failed due to setup PF switch: %d\n", err); - goto err_alloc_sw_unroll; - } + if (ice_is_safe_mode(pf)) + return; - clear_bit(ICE_SERVICE_DIS, pf->state); + /* initialize DDP driven features */ + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_init(pf); - /* tell the firmware we are up */ - err = ice_send_version(pf); - if (err) { - dev_err(dev, "probe failed sending driver version %s. error: %d\n", - UTS_RELEASE, err); - goto err_send_version_unroll; + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_init(pf); + + /* Note: Flow director init failure is non-fatal to load */ + if (ice_init_fdir(pf)) + dev_err(dev, "could not initialize flow director\n"); + + /* Note: DCB init failure is non-fatal to load */ + if (ice_init_pf_dcb(pf, false)) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + ice_cfg_lldp_mib_change(&pf->hw, true); } - /* since everything is good, start the service timer */ - mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + if (ice_init_lag(pf)) + dev_warn(dev, "Failed to init link aggregation support\n"); +} + +static void ice_deinit_features(struct ice_pf *pf) +{ + ice_deinit_lag(pf); + if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) + ice_cfg_lldp_mib_change(&pf->hw, false); + ice_deinit_fdir(pf); + if (ice_is_feature_supported(pf, ICE_F_GNSS)) + ice_gnss_exit(pf); + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_release(pf); +} + +static void ice_init_wakeup(struct ice_pf *pf) +{ + /* Save wakeup reason register for later use */ + pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); + + /* check for a power management event */ + ice_print_wake_reason(pf); + + /* clear wake status, all bits */ + wr32(&pf->hw, PFPM_WUS, U32_MAX); + + /* Disable WoL at init, wait for user to enable */ + device_set_wakeup_enable(ice_pf_to_dev(pf), false); +} + +static int ice_init_link(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; err = ice_init_link_events(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_link_events failed: %d\n", err); - goto err_send_version_unroll; + return err; } /* not a fatal error if this fails */ @@ -4873,109 +4897,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) set_bit(ICE_FLAG_NO_MEDIA, pf->flags); } - ice_verify_cacheline_size(pf); + return err; +} - /* Save wakeup reason register for later use */ - pf->wakeup_reason = rd32(hw, PFPM_WUS); +static int ice_init_pf_sw(struct ice_pf *pf) +{ + bool dvm = ice_is_dvm_ena(&pf->hw); + struct ice_vsi *vsi; + int err; - /* check for a power management event */ - ice_print_wake_reason(pf); + /* create switch struct for the switch element created by FW on boot */ + pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); + if (!pf->first_sw) + return -ENOMEM; - /* clear wake status, all bits */ - wr32(hw, PFPM_WUS, U32_MAX); + if (pf->hw.evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; - /* Disable WoL at init, wait for user to enable */ - device_set_wakeup_enable(dev, false); + pf->first_sw->pf = pf; - if (ice_is_safe_mode(pf)) { - ice_set_safe_mode_vlan_cfg(pf); - goto probe_done; + /* record the sw_id available for later use */ + pf->first_sw->sw_id = pf->hw.port_info->sw_id; + + err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); + if (err) + goto err_aq_set_port_params; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + err = -ENOMEM; + goto err_pf_vsi_setup; } - /* initialize DDP driven features */ - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_init(pf); + return 0; - if (ice_is_feature_supported(pf, ICE_F_GNSS)) - ice_gnss_init(pf); +err_pf_vsi_setup: +err_aq_set_port_params: + kfree(pf->first_sw); + return err; +} - /* Note: Flow director init failure is non-fatal to load */ - if (ice_init_fdir(pf)) - dev_err(dev, "could not initialize flow director\n"); +static void ice_deinit_pf_sw(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); - /* Note: DCB init failure is non-fatal to load */ - if (ice_init_pf_dcb(pf, false)) { - clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - clear_bit(ICE_FLAG_DCB_ENA, pf->flags); - } else { - ice_cfg_lldp_mib_change(&pf->hw, true); + if (!vsi) + return; + + ice_vsi_release(vsi); + kfree(pf->first_sw); +} + +static int ice_alloc_vsis(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + + pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; + if (!pf->num_alloc_vsi) + return -EIO; + + if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { + dev_warn(dev, + "limiting the VSI count due to UDP tunnel limitation %d > %d\n", + pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); + pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; } - if (ice_init_lag(pf)) - dev_warn(dev, "Failed to init link aggregation support\n"); + pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), + GFP_KERNEL); + if (!pf->vsi) + return -ENOMEM; - /* print PCI link speed and width */ - pcie_print_link_status(pf->pdev); + pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, + sizeof(*pf->vsi_stats), GFP_KERNEL); + if (!pf->vsi_stats) { + devm_kfree(dev, pf->vsi); + return -ENOMEM; + } -probe_done: - err = ice_register_netdev(pf); - if (err) - goto err_netdev_reg; + return 0; +} + +static void ice_dealloc_vsis(struct ice_pf *pf) +{ + devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); + pf->vsi_stats = NULL; + + pf->num_alloc_vsi = 0; + devm_kfree(ice_pf_to_dev(pf), pf->vsi); + pf->vsi = NULL; +} + +static int ice_init_devlink(struct ice_pf *pf) +{ + int err; err = ice_devlink_register_params(pf); if (err) - goto err_netdev_reg; + return err; + + ice_devlink_init_regions(pf); + ice_devlink_register(pf); + + return 0; +} + +static void ice_deinit_devlink(struct ice_pf *pf) +{ + ice_devlink_unregister(pf); + ice_devlink_destroy_regions(pf); + ice_devlink_unregister_params(pf); +} + +static int ice_init(struct ice_pf *pf) +{ + int err; + + err = ice_init_dev(pf); + if (err) + return err; + + err = ice_alloc_vsis(pf); + if (err) + goto err_alloc_vsis; + + err = ice_init_pf_sw(pf); + if (err) + goto err_init_pf_sw; + + ice_init_wakeup(pf); + + err = ice_init_link(pf); + if (err) + goto err_init_link; + + err = ice_send_version(pf); + if (err) + goto err_init_link; + + ice_verify_cacheline_size(pf); + + if (ice_is_safe_mode(pf)) + ice_set_safe_mode_vlan_cfg(pf); + else + /* print PCI link speed and width */ + pcie_print_link_status(pf->pdev); /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); - if (ice_is_rdma_ena(pf)) { - pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); - if (pf->aux_idx < 0) { - dev_err(dev, "Failed to allocate device ID for AUX driver\n"); - err = -ENOMEM; - goto err_devlink_reg_param; - } + clear_bit(ICE_SERVICE_DIS, pf->state); - err = ice_init_rdma(pf); - if (err) { - dev_err(dev, "Failed to initialize RDMA: %d\n", err); - err = -EIO; - goto err_init_aux_unroll; - } - } else { - dev_warn(dev, "RDMA is not supported on this device\n"); - } + /* since everything is good, start the service timer */ + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); - ice_devlink_register(pf); return 0; -err_init_aux_unroll: - pf->adev = NULL; - ida_free(&ice_aux_ida, pf->aux_idx); -err_devlink_reg_param: - ice_devlink_unregister_params(pf); -err_netdev_reg: -err_send_version_unroll: - ice_vsi_release_all(pf); -err_alloc_sw_unroll: +err_init_link: + ice_deinit_pf_sw(pf); +err_init_pf_sw: + ice_dealloc_vsis(pf); +err_alloc_vsis: + ice_deinit_dev(pf); + return err; +} + +static void ice_deinit(struct ice_pf *pf) +{ set_bit(ICE_SERVICE_DIS, pf->state); set_bit(ICE_DOWN, pf->state); - devm_kfree(dev, pf->first_sw); -err_msix_misc_unroll: - ice_free_irq_msix_misc(pf); -err_init_interrupt_unroll: - ice_clear_interrupt_scheme(pf); -err_init_vsi_stats_unroll: - devm_kfree(dev, pf->vsi_stats); - pf->vsi_stats = NULL; -err_init_vsi_unroll: - devm_kfree(dev, pf->vsi); -err_init_pf_unroll: - ice_deinit_pf(pf); - ice_devlink_destroy_regions(pf); - ice_deinit_hw(hw); -err_exit_unroll: - pci_disable_pcie_error_reporting(pdev); + + ice_deinit_pf_sw(pf); + ice_dealloc_vsis(pf); + ice_deinit_dev(pf); +} + +/** + * ice_load - load pf by init hw and starting VSI + * @pf: pointer to the pf instance + */ +int ice_load(struct ice_pf *pf) +{ + struct ice_vsi_cfg_params params = {}; + struct ice_vsi *vsi; + int err; + + err = ice_reset(&pf->hw, ICE_RESET_PFR); + if (err) + return err; + + err = ice_init_dev(pf); + if (err) + return err; + + vsi = ice_get_main_vsi(pf); + + params = ice_vsi_to_params(vsi); + params.flags = ICE_VSI_FLAG_INIT; + + err = ice_vsi_cfg(vsi, ¶ms); + if (err) + goto err_vsi_cfg; + + err = ice_start_eth(ice_get_main_vsi(pf)); + if (err) + goto err_start_eth; + + err = ice_init_rdma(pf); + if (err) + goto err_init_rdma; + + ice_init_features(pf); + ice_service_task_restart(pf); + + clear_bit(ICE_DOWN, pf->state); + + return 0; + +err_init_rdma: + ice_vsi_close(ice_get_main_vsi(pf)); +err_start_eth: + ice_vsi_decfg(ice_get_main_vsi(pf)); +err_vsi_cfg: + ice_deinit_dev(pf); + return err; +} + +/** + * ice_unload - unload pf by stopping VSI and deinit hw + * @pf: pointer to the pf instance + */ +void ice_unload(struct ice_pf *pf) +{ + ice_deinit_features(pf); + ice_deinit_rdma(pf); + ice_vsi_close(ice_get_main_vsi(pf)); + ice_vsi_decfg(ice_get_main_vsi(pf)); + ice_deinit_dev(pf); +} + +/** + * ice_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in ice_pci_tbl + * + * Returns 0 on success, negative on failure + */ +static int +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) +{ + struct device *dev = &pdev->dev; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + + if (pdev->is_virtfn) { + dev_err(dev, "can't probe a virtual function\n"); + return -EINVAL; + } + + /* this driver uses devres, see + * Documentation/driver-api/driver-model/devres.rst + */ + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); + if (err) { + dev_err(dev, "BAR0 I/O map error %d\n", err); + return err; + } + + pf = ice_allocate_pf(dev); + if (!pf) + return -ENOMEM; + + /* initialize Auxiliary index to invalid value */ + pf->aux_idx = -1; + + /* set up for high or low DMA */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(dev, "DMA configuration failed: 0x%x\n", err); + return err; + } + + pci_set_master(pdev); + + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(ICE_SERVICE_DIS, pf->state); + + hw = &pf->hw; + hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; + pci_save_state(pdev); + + hw->back = pf; + hw->port_info = NULL; + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + ice_set_ctrlq_len(hw); + + pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + +#ifndef CONFIG_DYNAMIC_DEBUG + if (debug < -1) + hw->debug_mask = debug; +#endif + + err = ice_init(pf); + if (err) + goto err_init; + + err = ice_init_eth(pf); + if (err) + goto err_init_eth; + + err = ice_init_rdma(pf); + if (err) + goto err_init_rdma; + + err = ice_init_devlink(pf); + if (err) + goto err_init_devlink; + + ice_init_features(pf); + + return 0; + +err_init_devlink: + ice_deinit_rdma(pf); +err_init_rdma: + ice_deinit_eth(pf); +err_init_eth: + ice_deinit(pf); +err_init: pci_disable_device(pdev); return err; } @@ -5050,51 +5315,33 @@ static void ice_remove(struct pci_dev *pdev) struct ice_pf *pf = pci_get_drvdata(pdev); int i; - ice_devlink_unregister(pf); for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) break; msleep(100); } - ice_tc_indir_block_remove(pf); - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } ice_service_task_stop(pf); - ice_aq_cancel_waiting_tasks(pf); - ice_unplug_aux_dev(pf); - if (pf->aux_idx >= 0) - ida_free(&ice_aux_ida, pf->aux_idx); - ice_devlink_unregister_params(pf); set_bit(ICE_DOWN, pf->state); - ice_deinit_lag(pf); - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_release(pf); - if (ice_is_feature_supported(pf, ICE_F_GNSS)) - ice_gnss_exit(pf); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); - ice_setup_mc_magic_wake(pf); + ice_deinit_features(pf); + ice_deinit_devlink(pf); + ice_deinit_rdma(pf); + ice_deinit_eth(pf); + ice_deinit(pf); + ice_vsi_release_all(pf); - mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + + ice_setup_mc_magic_wake(pf); ice_set_wake(pf); - ice_free_irq_msix_misc(pf); - ice_for_each_vsi(pf, i) { - if (!pf->vsi[i]) - continue; - ice_vsi_free_q_vectors(pf->vsi[i]); - } - devm_kfree(&pdev->dev, pf->vsi_stats); - pf->vsi_stats = NULL; - ice_deinit_pf(pf); - ice_devlink_destroy_regions(pf); - ice_deinit_hw(&pf->hw); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild @@ -5102,8 +5349,6 @@ static void ice_remove(struct pci_dev *pdev) */ ice_reset(&pf->hw, ICE_RESET_PFR); pci_wait_for_pending_transaction(pdev); - ice_clear_interrupt_scheme(pf); - pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } @@ -5531,7 +5776,7 @@ static int __init ice_module_init(void) pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -6137,24 +6382,21 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) } /** - * ice_vsi_cfg - Setup the VSI + * ice_vsi_cfg_lan - Setup the VSI lan related config * @vsi: the VSI being configured * * Return 0 on success and negative value on error */ -int ice_vsi_cfg(struct ice_vsi *vsi) +int ice_vsi_cfg_lan(struct ice_vsi *vsi) { int err; - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { ice_set_rx_mode(vsi->netdev); - if (vsi->type != ICE_VSI_LB) { - err = ice_vsi_vlan_setup(vsi); - - if (err) - return err; - } + err = ice_vsi_vlan_setup(vsi); + if (err) + return err; } ice_vsi_cfg_dcb_rings(vsi); @@ -6335,7 +6577,7 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev) { + vsi->netdev && vsi->type == ICE_VSI_PF) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); @@ -6346,7 +6588,9 @@ static int ice_up_complete(struct ice_vsi *vsi) * set the baseline so counters are ready when interface is up */ ice_update_eth_stats(vsi); - ice_service_task_schedule(pf); + + if (vsi->type == ICE_VSI_PF) + ice_service_task_schedule(pf); return 0; } @@ -6359,7 +6603,7 @@ int ice_up(struct ice_vsi *vsi) { int err; - err = ice_vsi_cfg(vsi); + err = ice_vsi_cfg_lan(vsi); if (!err) err = ice_up_complete(vsi); @@ -6927,7 +7171,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) if (err) goto err_setup_rx; - err = ice_vsi_cfg(vsi); + err = ice_vsi_cfg_lan(vsi); if (err) goto err_setup_rx; @@ -6981,7 +7225,7 @@ int ice_vsi_open(struct ice_vsi *vsi) if (err) goto err_setup_rx; - err = ice_vsi_cfg(vsi); + err = ice_vsi_cfg_lan(vsi); if (err) goto err_setup_rx; @@ -7066,7 +7310,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) continue; /* rebuild the VSI */ - err = ice_vsi_rebuild(vsi, true); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); if (err) { dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", err, vsi->idx, ice_vsi_type_str(type)); @@ -7322,18 +7566,6 @@ clear_recovery: } /** - * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP - * @vsi: Pointer to VSI structure - */ -static int ice_max_xdp_frame_size(struct ice_vsi *vsi) -{ - if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; - else - return ICE_RXBUF_3072; -} - -/** * ice_change_mtu - NDO callback to change the MTU * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -7345,6 +7577,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct bpf_prog *prog; u8 count = 0; int err = 0; @@ -7353,7 +7586,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return 0; } - if (ice_is_xdp_ena_vsi(vsi)) { + prog = vsi->xdp_prog; + if (prog && !prog->aux->xdp_has_frags) { int frame_size = ice_max_xdp_frame_size(vsi); if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { @@ -7361,6 +7595,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) frame_size - ICE_ETH_PKT_HDR_PAD); return -EINVAL; } + } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { + if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { + netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", + ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); + return -EINVAL; + } } /* if a reset is in progress, wait for some time for it to complete */ @@ -8411,12 +8651,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) /* clear the VSI from scheduler tree */ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); - /* Delete VSI from FW */ + /* Delete VSI from FW, PF and HW VSI arrays */ ice_vsi_delete(ch->ch_vsi); - /* Delete VSI from PF and HW VSI arrays */ - ice_vsi_clear(ch->ch_vsi); - /* free the channel */ kfree(ch); } @@ -8475,7 +8712,7 @@ static int ice_rebuild_channels(struct ice_pf *pf) type = vsi->type; /* rebuild ADQ VSI */ - err = ice_vsi_rebuild(vsi, true); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); if (err) { dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", ice_vsi_type_str(type), vsi->idx, err); @@ -8707,14 +8944,14 @@ config_tcf: cur_rxq = vsi->num_rxq; /* proceed with rebuild main VSI using correct number of queues */ - ret = ice_vsi_rebuild(vsi, false); + ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); if (ret) { /* fallback to current number of queues */ dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); vsi->req_txq = cur_txq; vsi->req_rxq = cur_rxq; clear_bit(ICE_RESET_FAILED, pf->state); - if (ice_vsi_rebuild(vsi, false)) { + if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { dev_err(dev, "Rebuild of main VSI failed again\n"); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index c262dc886e6a..f6f52a248066 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(*tmp); i++) - /* cppcheck-suppress objectIndex */ sum += ((u8 *)tmp)[i]; if (sum) { diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index d63161d73eb1..ac6f06f9a2ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -680,6 +680,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; + bool link_up; int err; u8 idx; @@ -695,11 +696,14 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) if (err) return false; + /* Drop packets if the link went down */ + link_up = ptp_port->link_up; + for_each_set_bit(idx, tx->in_use, tx->len) { struct skb_shared_hwtstamps shhwtstamps = {}; u8 phy_idx = idx + tx->offset; u64 raw_tstamp = 0, tstamp; - bool drop_ts = false; + bool drop_ts = !link_up; struct sk_buff *skb; /* Drop packets which have waited for more than 2 seconds */ @@ -728,7 +732,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); - if (err) + if (err && !drop_ts) continue; ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); @@ -1770,6 +1774,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, } /** + * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + */ +static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_perout_channel clk_cfg = {0}; + int err; + + switch (rq->type) { + case PTP_CLK_REQ_PPS: + clk_cfg.gpio_pin = PPS_PIN_INDEX; + clk_cfg.period = NSEC_PER_SEC; + clk_cfg.ena = !!on; + + err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); + break; + case PTP_CLK_REQ_EXTTS: + err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, + TIME_SYNC_PIN_INDEX, rq->extts.flags); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +/** * ice_ptp_gettimex64 - Get the time of the clock * @info: the driver's PTP info structure * @ts: timespec64 structure to hold the current time value @@ -2221,6 +2257,19 @@ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) } /** + * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ + info->pps = 1; + info->n_per_out = 0; + info->n_ext_ts = 1; +} + +/** * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support * @pf: Board private structure * @info: PTP info to fill @@ -2258,6 +2307,23 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) } /** + * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support + * @pf: Board private structure + * @info: PTP info to fill + * + * Assign functions to the PTP capabiltiies structure for E823 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for e823 + * devices. + */ +static void +ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ + info->enable = ice_ptp_gpio_enable_e823; + ice_ptp_setup_pins_e823(pf, info); +} + +/** * ice_ptp_set_caps - Set PTP capabilities * @pf: Board private structure */ @@ -2269,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf) snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", dev_driver_string(dev), dev_name(dev)); info->owner = THIS_MODULE; - info->max_adj = 999999999; + info->max_adj = 100000000; info->adjtime = ice_ptp_adjtime; info->adjfine = ice_ptp_adjfine; info->gettimex64 = ice_ptp_gettimex64; @@ -2277,6 +2343,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf) if (ice_is_e810(&pf->hw)) ice_ptp_set_funcs_e810(pf, info); + else if (ice_is_e823(&pf->hw)) + ice_ptp_set_funcs_e823(pf, info); else ice_ptp_set_funcs_e822(pf, info); } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 6d08b397df2a..4eca8d195ef0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1063,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { u16 max_child_nodes, num_added = 0; - /* cppcheck-suppress unusedVariable */ u32 temp; status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, @@ -1655,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; - int status; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { + int status; + if (!parent) return -EIO; @@ -1756,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, u32 first_node_teid; u16 num_added = 0; u8 i, vsil; - int status; if (!pi) return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + int status; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 3ba1408c56a9..96a64c25e2ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -41,21 +41,6 @@ static void ice_free_vf_entries(struct ice_pf *pf) } /** - * ice_vf_vsi_release - invalidate the VF's VSI after freeing it - * @vf: invalidate this VF's VSI after freeing it - */ -static void ice_vf_vsi_release(struct ice_vf *vf) -{ - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - if (WARN_ON(!vsi)) - return; - - ice_vsi_release(vsi); - ice_vf_invalidate_vsi(vf); -} - -/** * ice_free_vf_res - Free a VF's resources * @vf: pointer to the VF info */ @@ -248,11 +233,16 @@ void ice_free_vfs(struct ice_pf *pf) */ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) { - struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_vsi_cfg_params params = {}; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); + params.type = ICE_VSI_VF; + params.pi = ice_vf_get_port_info(vf); + params.vf = vf; + params.flags = ICE_VSI_FLAG_INIT; + + vsi = ice_vsi_setup(pf, ¶ms); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); @@ -583,51 +573,19 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) */ static int ice_init_vf_vsi_res(struct ice_vf *vf) { - struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vf->pf; - u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; - struct device *dev; int err; vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); - dev = ice_pf_to_dev(pf); vsi = ice_vf_vsi_setup(vf); if (!vsi) return -ENOMEM; - err = ice_vsi_add_vlan_zero(vsi); - if (err) { - dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", - vf->vf_id); - goto release_vsi; - } - - vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - err = vlan_ops->ena_rx_filtering(vsi); - if (err) { - dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", - vf->vf_id); - goto release_vsi; - } - - eth_broadcast_addr(broadcast); - err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (err) { - dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", - vf->vf_id, err); - goto release_vsi; - } - - err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); - if (err) { - dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", - vf->vf_id); + err = ice_vf_init_host_cfg(vf, vsi); + if (err) goto release_vsi; - } - - vf->num_mac = 1; return 0; @@ -697,6 +655,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf) } /** + * ice_sriov_clear_reset_state - clears VF Reset status register + * @vf: the vf to configure + */ +static void ice_sriov_clear_reset_state(struct ice_vf *vf) +{ + struct ice_hw *hw = &vf->pf->hw; + + /* Clear the reset status register so that VF immediately sees that + * the device is resetting, even if hardware hasn't yet gotten around + * to clearing VFGEN_RSTAT for us. + */ + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS); +} + +/** * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers * @vf: the vf to configure */ @@ -799,23 +772,19 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf) } /** - * ice_sriov_vsi_rebuild - release and rebuild VF's VSI - * @vf: VF to release and setup the VSI for + * ice_sriov_create_vsi - Create a new VSI for a VF + * @vf: VF to create the VSI for * - * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF - * configuration change, etc.). + * This is called by ice_vf_recreate_vsi to create the new VSI after the old + * VSI has been released. */ -static int ice_sriov_vsi_rebuild(struct ice_vf *vf) +static int ice_sriov_create_vsi(struct ice_vf *vf) { - struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; - ice_vf_vsi_release(vf); - if (!ice_vf_vsi_setup(vf)) { - dev_err(ice_pf_to_dev(pf), - "Failed to release and setup the VF%u's VSI\n", - vf->vf_id); + vsi = ice_vf_vsi_setup(vf); + if (!vsi) return -ENOMEM; - } return 0; } @@ -826,8 +795,6 @@ static int ice_sriov_vsi_rebuild(struct ice_vf *vf) */ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) { - ice_vf_rebuild_host_cfg(vf); - ice_vf_set_initialized(vf); ice_ena_vf_mappings(vf); wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); } @@ -835,11 +802,13 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf) static const struct ice_vf_ops ice_sriov_vf_ops = { .reset_type = ICE_VF_RESET, .free = ice_sriov_free_vf, + .clear_reset_state = ice_sriov_clear_reset_state, .clear_mbx_register = ice_sriov_clear_mbx_register, .trigger_reset_register = ice_sriov_trigger_reset_register, .poll_reset_status = ice_sriov_poll_reset_status, .clear_reset_trigger = ice_sriov_clear_reset_trigger, - .vsi_rebuild = ice_sriov_vsi_rebuild, + .irq_close = NULL, + .create_vsi = ice_sriov_create_vsi, .post_vsi_rebuild = ice_sriov_post_vsi_rebuild, }; @@ -879,21 +848,9 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) /* set sriov vf ops for VFs created during SRIOV flow */ vf->vf_ops = &ice_sriov_vf_ops; - vf->vf_sw_id = pf->first_sw; - /* assign default capabilities */ - vf->spoofchk = true; - vf->num_vf_qs = pf->vfs.num_qps_per; - ice_vc_set_default_allowlist(vf); - - /* ctrl_vsi_idx will be set to a valid value only when VF - * creates its first fdir rule. - */ - ice_vf_ctrl_invalidate_vsi(vf); - ice_vf_fdir_init(vf); - - ice_virtchnl_set_dflt_ops(vf); + ice_initialize_vf_entry(vf); - mutex_init(&vf->cfg_lock); + vf->vf_sw_id = pf->first_sw; hash_add_rcu(vfs->table, &vf->entry, vf_id); } @@ -1285,7 +1242,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) goto out_put_vf; ivi->vf = vf_id; - ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); + ether_addr_copy(ivi->mac, vf->hw_lan_addr); /* VF configuration for VLAN and applicable QoS */ ivi->vlan = ice_vf_get_port_vlan_id(vf); @@ -1333,8 +1290,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EINVAL; /* nothing left to do, unicast MAC already set */ - if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && - ether_addr_equal(vf->hw_lan_addr.addr, mac)) { + if (ether_addr_equal(vf->dev_lan_addr, mac) && + ether_addr_equal(vf->hw_lan_addr, mac)) { ret = 0; goto out_put_vf; } @@ -1348,8 +1305,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* VF is notified of its new MAC via the PF's response to the * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset */ - ether_addr_copy(vf->dev_lan_addr.addr, mac); - ether_addr_copy(vf->hw_lan_addr.addr, mac); + ether_addr_copy(vf->dev_lan_addr, mac); + ether_addr_copy(vf->hw_lan_addr, mac); if (is_zero_ether_addr(mac)) { /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ vf->pf_set_mac = false; @@ -1750,7 +1707,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, - vf->dev_lan_addr.addr, + vf->dev_lan_addr, test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) ? "on" : "off"); } @@ -1794,7 +1751,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, - vf->dev_lan_addr.addr); + vf->dev_lan_addr); } } mutex_unlock(&pf->vfs.table_lock); @@ -1884,7 +1841,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, if (pf_vsi) dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", - &vf->dev_lan_addr.addr[0], + &vf->dev_lan_addr[0], pf_vsi->netdev->dev_addr); } } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 9b762f7972ce..61f844d22512 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw)); if (status) - goto err_free_lkup_exts; + goto err_unroll; /* Group match words into recipes using preferred recipe grouping * criteria. diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index faba0f857cd9..6b48cbc049c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -792,7 +792,7 @@ static struct ice_vsi * ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { struct ice_rx_ring *ring = NULL; - struct ice_vsi *ch_vsi = NULL; + struct ice_vsi *dest_vsi = NULL; struct ice_pf *pf = vsi->back; struct device *dev; u32 tc_class; @@ -810,7 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) return ERR_PTR(-EOPNOTSUPP); } /* Locate ADQ VSI depending on hw_tc number */ - ch_vsi = vsi->tc_map_vsi[tc_class]; + dest_vsi = vsi->tc_map_vsi[tc_class]; break; case ICE_FWD_TO_Q: /* Locate the Rx queue */ @@ -824,7 +824,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) /* Determine destination VSI even though the action is * FWD_TO_QUEUE, because QUEUE is associated with VSI */ - ch_vsi = tc_fltr->dest_vsi; + dest_vsi = tc_fltr->dest_vsi; break; default: dev_err(dev, @@ -832,13 +832,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) tc_fltr->action.fltr_act); return ERR_PTR(-EINVAL); } - /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */ - if (!ch_vsi) { + /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ + if (!dest_vsi) { dev_err(dev, "Unable to add filter because specified destination VSI doesn't exist\n"); return ERR_PTR(-EINVAL); } - return ch_vsi; + return dest_vsi; } /** @@ -860,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 flags = tc_fltr->flags; - struct ice_vsi *ch_vsi; + struct ice_vsi *dest_vsi; struct device *dev; u16 lkups_cnt = 0; u16 l4_proto = 0; @@ -883,9 +883,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, } /* validate forwarding action VSI and queue */ - ch_vsi = ice_tc_forward_action(vsi, tc_fltr); - if (IS_ERR(ch_vsi)) - return PTR_ERR(ch_vsi); + if (ice_is_forward_action(tc_fltr->action.fltr_act)) { + dest_vsi = ice_tc_forward_action(vsi, tc_fltr); + if (IS_ERR(dest_vsi)) + return PTR_ERR(dest_vsi); + } lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); @@ -904,7 +906,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI: - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; rule_info.sw_act.src = hw->pf_id; rule_info.rx = true; @@ -915,7 +917,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, case ICE_FWD_TO_Q: /* HW queue number in global space */ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; rule_info.sw_act.src = hw->pf_id; rule_info.rx = true; @@ -923,14 +925,15 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.queue, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; - default: - rule_info.sw_act.flag |= ICE_FLTR_TX; - /* In case of Tx (LOOKUP_TX), src needs to be src VSI */ - rule_info.sw_act.src = vsi->idx; - /* 'Rx' is false, direction of rule(LOOKUPTRX) */ - rule_info.rx = false; + case ICE_DROP_PACKET: + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; + default: + ret = -EOPNOTSUPP; + goto exit; } ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); @@ -953,11 +956,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->dest_vsi_handle = rule_added.vsi_handle; if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { - tc_fltr->dest_vsi = ch_vsi; + tc_fltr->dest_vsi = dest_vsi; /* keep track of advanced switch filter for * destination VSI */ - ch_vsi->num_chnl_fltr++; + dest_vsi->num_chnl_fltr++; /* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF && @@ -978,6 +981,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, rule_added.rid, rule_added.rule_id); break; + case ICE_DROP_PACKET: + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n", + lkups_cnt, flags, rule_added.rid, rule_added.rule_id); + break; default: break; } @@ -1681,7 +1688,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, struct ice_vsi *ch_vsi = NULL; u16 queue = act->rx_queue; - if (queue > vsi->num_rxq) { + if (queue >= vsi->num_rxq) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified queue is invalid"); return -EINVAL; @@ -1712,6 +1719,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, case FLOW_ACTION_RX_QUEUE_MAPPING: /* forward to queue */ return ice_tc_forward_to_queue(vsi, fltr, act); + case FLOW_ACTION_DROP: + fltr->action.fltr_act = ICE_DROP_PACKET; + return 0; default: NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index d916d1e92aa3..8d5e22ac7023 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -211,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); +static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) +{ + switch (fltr_act) { + case ICE_FWD_TO_VSI: + case ICE_FWD_TO_Q: + return true; + default: + return false; + } +} #endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 086f0b3ab68d..dfd22862e926 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | ICE_TX_DESC_CMD_RE; - tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; + tx_buf->type = ICE_TX_BUF_DUMMY; tx_buf->raw_buf = raw_packet; tx_desc->cmd_type_offset_bsz = @@ -112,27 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (tx_buf->skb) { - if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) - devm_kfree(ring->dev, tx_buf->raw_buf); - else if (ice_ring_is_xdp(ring)) - page_frag_free(tx_buf->raw_buf); - else - dev_kfree_skb_any(tx_buf->skb); - if (dma_unmap_len(tx_buf, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buf, len)) { + if (dma_unmap_len(tx_buf, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + switch (tx_buf->type) { + case ICE_TX_BUF_DUMMY: + devm_kfree(ring->dev, tx_buf->raw_buf); + break; + case ICE_TX_BUF_SKB: + dev_kfree_skb_any(tx_buf->skb); + break; + case ICE_TX_BUF_XDP_TX: + page_frag_free(tx_buf->raw_buf); + break; + case ICE_TX_BUF_XDP_XMIT: + xdp_return_frame(tx_buf->xdpf); + break; } tx_buf->next_to_watch = NULL; - tx_buf->skb = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; dma_unmap_len_set(tx_buf, len, 0); /* tx_buf must be completely set up in the transmit path */ } @@ -174,8 +176,6 @@ tx_skip_free: tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1; - tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1; if (!tx_ring->netdev) return; @@ -267,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) DMA_TO_DEVICE); /* clear tx_buf data */ - tx_buf->skb = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; dma_unmap_len_set(tx_buf, len, 0); /* unmap remaining buffers */ @@ -382,6 +382,7 @@ err: */ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) { + struct xdp_buff *xdp = &rx_ring->xdp; struct device *dev = rx_ring->dev; u32 size; u16 i; @@ -390,16 +391,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) if (!rx_ring->rx_buf) return; - if (rx_ring->skb) { - dev_kfree_skb(rx_ring->skb); - rx_ring->skb = NULL; - } - if (rx_ring->xsk_pool) { ice_xsk_clean_rx_ring(rx_ring); goto rx_skip_free; } + if (xdp->data) { + xdp_return_buff(xdp); + xdp->data = NULL; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; @@ -437,6 +438,7 @@ rx_skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; + rx_ring->first_desc = 0; rx_ring->next_to_use = 0; } @@ -506,6 +508,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; + rx_ring->first_desc = 0; if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); @@ -523,8 +526,16 @@ err: return -ENOMEM; } +/** + * ice_rx_frame_truesize + * @rx_ring: ptr to Rx ring + * @size: size + * + * calculate the truesize with taking into the account PAGE_SIZE of + * underlying arch + */ static unsigned int -ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) +ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) { unsigned int truesize; @@ -545,34 +556,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action + * @rx_buf: Rx buffer to store the XDP action * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ -static int +static void ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, + struct ice_rx_buf *rx_buf) { - int err; + unsigned int ret = ICE_XDP_PASS; u32 act; + if (!xdp_prog) + goto exit; + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: - return ICE_XDP_PASS; + break; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); - err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); + ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); - if (err == ICE_XDP_CONSUMED) + if (ret == ICE_XDP_CONSUMED) goto out_failure; - return err; + break; case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) goto out_failure; - return ICE_XDP_REDIR; + ret = ICE_XDP_REDIR; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; @@ -581,8 +597,31 @@ out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_DROP: - return ICE_XDP_CONSUMED; + ret = ICE_XDP_CONSUMED; } +exit: + rx_buf->act = ret; + if (unlikely(xdp_buff_has_frags(xdp))) + ice_set_rx_bufs_act(xdp, rx_ring, ret); +} + +/** + * ice_xmit_xdp_ring - submit frame to XDP ring for transmission + * @xdpf: XDP frame that will be converted to XDP buff + * @xdp_ring: XDP ring for transmission + */ +static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf, + struct ice_tx_ring *xdp_ring) +{ + struct xdp_buff xdp; + + xdp.data_hard_start = (void *)xdpf; + xdp.data = xdpf->data; + xdp.data_end = xdp.data + xdpf->len; + xdp.frame_sz = xdpf->frame_sz; + xdp.flags = xdpf->flags; + + return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); } /** @@ -605,6 +644,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, unsigned int queue_index = smp_processor_id(); struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *xdp_ring; + struct ice_tx_buf *tx_buf; int nxmit = 0, i; if (test_bit(ICE_VSI_DOWN, vsi->state)) @@ -627,16 +667,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdp_ring = vsi->xdp_rings[queue_index]; } + tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; for (i = 0; i < n; i++) { - struct xdp_frame *xdpf = frames[i]; + const struct xdp_frame *xdpf = frames[i]; int err; - err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); + err = ice_xmit_xdp_ring(xdpf, xdp_ring); if (err != ICE_XDP_TX) break; nxmit++; } + tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); if (unlikely(flags & XDP_XMIT_FLUSH)) ice_xdp_ring_update_tail(xdp_ring); @@ -706,7 +748,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) * buffers. Then bump tail at most one time. Grouping like this lets us avoid * multiple tail writes per call. */ -bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) +bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) { union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; @@ -783,7 +825,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) /** * ice_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buf: buffer containing the page - * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call * * If page is reusable, we have a green light for calling ice_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is @@ -791,7 +832,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) * page freed */ static bool -ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) +ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) { unsigned int pagecnt_bias = rx_buf->pagecnt_bias; struct page *page = rx_buf->page; @@ -802,7 +843,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) + if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) return false; #else #define ICE_LAST_OFFSET \ @@ -824,33 +865,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) } /** - * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag * @rx_ring: Rx descriptor ring to transact packets on + * @xdp: xdp buff to place the data into * @rx_buf: buffer containing page to add - * @skb: sk_buff to place the data into * @size: packet length from rx_desc * - * This function will add the data contained in rx_buf->page to the skb. - * It will just attach the page as a frag to the skb. - * The function will then update the page offset. + * This function will add the data contained in rx_buf->page to the xdp buf. + * It will just attach the page as a frag. */ -static void -ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, - struct sk_buff *skb, unsigned int size) +static int +ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + struct ice_rx_buf *rx_buf, const unsigned int size) { -#if (PAGE_SIZE >= 8192) - unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); -#else - unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#endif + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); if (!size) - return; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->page_offset, size, truesize); + return 0; + + if (!xdp_buff_has_frags(xdp)) { + sinfo->nr_frags = 0; + sinfo->xdp_frags_size = 0; + xdp_buff_set_frags_flag(xdp); + } - /* page is being used so we must update the page offset */ - ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { + if (unlikely(xdp_buff_has_frags(xdp))) + ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); + return -ENOMEM; + } + + __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, + rx_buf->page_offset, size); + sinfo->xdp_frags_size += size; + + if (page_is_pfmemalloc(rx_buf->page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + return 0; } /** @@ -886,19 +938,18 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * @rx_ring: Rx descriptor ring to transact packets on * @size: size of buffer to add to skb - * @rx_buf_pgcnt: rx_buf page refcount * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct ice_rx_buf * ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, - int *rx_buf_pgcnt) + const unsigned int ntc) { struct ice_rx_buf *rx_buf; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - *rx_buf_pgcnt = + rx_buf = &rx_ring->rx_buf[ntc]; + rx_buf->pgcnt = #if (PAGE_SIZE < 8192) page_count(rx_buf->page); #else @@ -922,26 +973,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, /** * ice_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buf: Rx buffer to pull data from * @xdp: xdp_buff pointing to the data * - * This function builds an skb around an existing Rx buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. + * This function builds an skb around an existing XDP buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. Driver has + * already combined frags (if any) to skb_shared_info. */ static struct sk_buff * -ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, - struct xdp_buff *xdp) +ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) { u8 metasize = xdp->data - xdp->data_meta; -#if (PAGE_SIZE < 8192) - unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(xdp->data_end - - xdp->data_hard_start); -#endif + struct skb_shared_info *sinfo = NULL; + unsigned int nr_frags; struct sk_buff *skb; + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + } + /* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points exactly as xdp->data, otherwise we * likely have a consumer accessing first few bytes of meta @@ -949,7 +999,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, */ net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = napi_build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); if (unlikely(!skb)) return NULL; @@ -964,8 +1014,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, if (metasize) skb_metadata_set(skb, metasize); - /* buffer is used by skb, update page_offset */ - ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + if (unlikely(xdp_buff_has_frags(xdp))) + xdp_update_skb_shared_info(skb, nr_frags, + sinfo->xdp_frags_size, + nr_frags * xdp->frame_sz, + xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } @@ -981,24 +1034,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, * skb correctly. */ static struct sk_buff * -ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, - struct xdp_buff *xdp) +ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) { - unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; + struct skb_shared_info *sinfo = NULL; + struct ice_rx_buf *rx_buf; + unsigned int nr_frags = 0; unsigned int headlen; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(xdp->data_meta); + net_prefetch(xdp->data); + + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + } /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - ICE_RX_HDR_SIZE + metasize, + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; + rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; skb_record_rx_queue(skb, rx_ring->q_index); /* Determine available headroom for copy */ headlen = size; @@ -1006,32 +1065,42 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, - ALIGN(headlen + metasize, sizeof(long))); - - if (metasize) { - skb_metadata_set(skb, metasize); - __skb_pull(skb, metasize); - } + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, + sizeof(long))); /* if we exhaust the linear part then add what is left as a frag */ size -= headlen; if (size) { -#if (PAGE_SIZE >= 8192) - unsigned int truesize = SKB_DATA_ALIGN(size); -#else - unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#endif + /* besides adding here a partial frag, we are going to add + * frags from xdp_buff, make sure there is enough space for + * them + */ + if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { + dev_kfree_skb(skb); + return NULL; + } skb_add_rx_frag(skb, 0, rx_buf->page, - rx_buf->page_offset + headlen, size, truesize); - /* buffer is used by skb, update page_offset */ - ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + rx_buf->page_offset + headlen, size, + xdp->frame_sz); } else { - /* buffer is unused, reset bias back to rx_buf; data was copied - * onto skb's linear part so there's no need for adjusting - * page offset and we can reuse this buffer as-is + /* buffer is unused, change the act that should be taken later + * on; data was copied onto skb's linear part so there's no + * need for adjusting page offset and we can reuse this buffer + * as-is */ - rx_buf->pagecnt_bias++; + rx_buf->act = ICE_SKB_CONSUMED; + } + + if (unlikely(xdp_buff_has_frags(xdp))) { + struct skb_shared_info *skinfo = skb_shinfo(skb); + + memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], + sizeof(skb_frag_t) * nr_frags); + + xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, + sinfo->xdp_frags_size, + nr_frags * xdp->frame_sz, + xdp_buff_is_frag_pfmemalloc(xdp)); } return skb; @@ -1041,26 +1110,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, * ice_put_rx_buf - Clean up used buffer and either recycle or free * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from - * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() * - * This function will update next_to_clean and then clean up the contents - * of the rx_buf. It will either recycle the buffer or unmap it and free - * the associated resources. + * This function will clean up the contents of the rx_buf. It will either + * recycle the buffer or unmap it and free the associated resources. */ static void -ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, - int rx_buf_pgcnt) +ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) { - u16 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - if (!rx_buf) return; - if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { + if (ice_can_reuse_rx_page(rx_buf)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); } else { @@ -1076,27 +1136,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, } /** - * ice_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * - * If the buffer is an EOP buffer, this function exits returning false, - * otherwise return true indicating that this is in fact a non-EOP buffer. - */ -static bool -ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) -{ - /* if we are the last buffer then there is nothing else to do */ -#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) - if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF))) - return false; - - rx_ring->ring_stats->rx_stats.non_eop_descs++; - - return true; -} - -/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1110,39 +1149,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) */ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { - unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; - u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; + struct xdp_buff *xdp = &rx_ring->xdp; struct ice_tx_ring *xdp_ring = NULL; - unsigned int xdp_res, xdp_xmit = 0; - struct sk_buff *skb = rx_ring->skb; struct bpf_prog *xdp_prog = NULL; - struct xdp_buff xdp; + u32 ntc = rx_ring->next_to_clean; + u32 cnt = rx_ring->count; + u32 cached_ntc = ntc; + u32 xdp_xmit = 0; + u32 cached_ntu; bool failure; + u32 first; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) - frame_sz = ice_rx_frame_truesize(rx_ring, 0); + xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); #endif - xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); xdp_prog = READ_ONCE(rx_ring->xdp_prog); - if (xdp_prog) + if (xdp_prog) { xdp_ring = rx_ring->xdp_ring; + cached_ntu = xdp_ring->next_to_use; + } /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; struct ice_rx_buf *rx_buf; - unsigned char *hard_start; + struct sk_buff *skb; unsigned int size; u16 stat_err_bits; - int rx_buf_pgcnt; u16 vlan_tag = 0; u16 rx_ptype; /* get the Rx desc from Rx ring based on 'next_to_clean' */ - rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); + rx_desc = ICE_RX_DESC(rx_ring, ntc); /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr @@ -1166,8 +1208,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && ctrl_vsi->vf) ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); - ice_put_rx_buf(rx_ring, NULL, 0); - cleaned_count++; + if (++ntc == cnt) + ntc = 0; continue; } @@ -1175,65 +1217,56 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ICE_RX_FLX_DESC_PKT_LEN_M; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); + rx_buf = ice_get_rx_buf(rx_ring, size, ntc); - if (!size) { - xdp.data = NULL; - xdp.data_end = NULL; - xdp.data_hard_start = NULL; - xdp.data_meta = NULL; - goto construct_skb; - } + if (!xdp->data) { + void *hard_start; - hard_start = page_address(rx_buf->page) + rx_buf->page_offset - - offset; - xdp_prepare_buff(&xdp, hard_start, offset, size, true); + hard_start = page_address(rx_buf->page) + rx_buf->page_offset - + offset; + xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); #if (PAGE_SIZE > 4096) - /* At larger PAGE_SIZE, frame_sz depend on len size */ - xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); #endif + xdp_buff_clear_frags_flag(xdp); + } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { + break; + } + if (++ntc == cnt) + ntc = 0; - if (!xdp_prog) - goto construct_skb; + /* skip if it is NOP desc */ + if (ice_is_non_eop(rx_ring, rx_desc)) + continue; - xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); - if (!xdp_res) + ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); + if (rx_buf->act == ICE_XDP_PASS) goto construct_skb; - if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { - xdp_xmit |= xdp_res; - ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); - } else { - rx_buf->pagecnt_bias++; - } - total_rx_bytes += size; + total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - cleaned_count++; - ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); + xdp->data = NULL; + rx_ring->first_desc = ntc; continue; construct_skb: - if (skb) { - ice_add_rx_frag(rx_ring, rx_buf, skb, size); - } else if (likely(xdp.data)) { - if (ice_ring_uses_build_skb(rx_ring)) - skb = ice_build_skb(rx_ring, rx_buf, &xdp); - else - skb = ice_construct_skb(rx_ring, rx_buf, &xdp); - } + if (likely(ice_ring_uses_build_skb(rx_ring))) + skb = ice_build_skb(rx_ring, xdp); + else + skb = ice_construct_skb(rx_ring, xdp); /* exit if we failed to retrieve a buffer */ if (!skb) { - rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - if (rx_buf) - rx_buf->pagecnt_bias++; + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + rx_buf->act = ICE_XDP_CONSUMED; + if (unlikely(xdp_buff_has_frags(xdp))) + ice_set_rx_bufs_act(xdp, rx_ring, + ICE_XDP_CONSUMED); + xdp->data = NULL; + rx_ring->first_desc = ntc; break; } - - ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); - cleaned_count++; - - /* skip if it is NOP desc */ - if (ice_is_non_eop(rx_ring, rx_desc)) - continue; + xdp->data = NULL; + rx_ring->first_desc = ntc; stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, @@ -1245,10 +1278,8 @@ construct_skb: vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); /* pad the skb if needed, to make a valid ethernet frame */ - if (eth_skb_pad(skb)) { - skb = NULL; + if (eth_skb_pad(skb)) continue; - } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1262,18 +1293,34 @@ construct_skb: ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); /* send completed skb up the stack */ ice_receive_skb(rx_ring, skb, vlan_tag); - skb = NULL; /* update budget accounting */ total_rx_pkts++; } + first = rx_ring->first_desc; + while (cached_ntc != first) { + struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; + + if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + xdp_xmit |= buf->act; + } else if (buf->act & ICE_XDP_CONSUMED) { + buf->pagecnt_bias++; + } else if (buf->act == ICE_XDP_PASS) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + } + + ice_put_rx_buf(rx_ring, buf); + if (++cached_ntc >= cnt) + cached_ntc = 0; + } + rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ - failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); + failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); - if (xdp_prog) - ice_finalize_xdp_rx(xdp_ring, xdp_xmit); - rx_ring->skb = skb; + if (xdp_xmit) + ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); if (rx_ring->ring_stats) ice_update_rx_ring_stats(rx_ring, total_rx_pkts, @@ -1682,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, DMA_TO_DEVICE); tx_buf = &tx_ring->tx_buf[i]; + tx_buf->type = ICE_TX_BUF_FRAG; } /* record SW timestamp if HW timestamp is not available */ @@ -1996,7 +2044,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) if (err < 0) return err; - /* cppcheck-suppress unreadVariable */ protocol = vlan_get_protocol(skb); if (eth_p_mpls(protocol)) @@ -2033,8 +2080,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) } /* reset pointers to inner headers */ - - /* cppcheck-suppress unreadVariable */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); @@ -2300,6 +2345,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ice_trace(xmit_frame_ring, tx_ring, skb); + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto out_drop; + count = ice_xmit_desc_count(skb); if (ice_chk_linearize(skb, count)) { if (__skb_linearize(skb)) @@ -2328,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buf[tx_ring->next_to_use]; first->skb = skb; + first->type = ICE_TX_BUF_SKB; first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); first->gso_segs = 1; first->tx_flags = 0; @@ -2500,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) + if (tx_buf->type == ICE_TX_BUF_DUMMY) devm_kfree(tx_ring->dev, tx_buf->raw_buf); /* clear next_to_watch to prevent false hangs */ - tx_buf->raw_buf = NULL; + tx_buf->type = ICE_TX_BUF_EMPTY; tx_buf->tx_flags = 0; tx_buf->next_to_watch = NULL; dma_unmap_len_set(tx_buf, len, 0); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 4fd0e5d0a313..fff0efe28373 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -9,10 +9,12 @@ #define ICE_DFLT_IRQ_WORK 256 #define ICE_RXBUF_3072 3072 #define ICE_RXBUF_2048 2048 +#define ICE_RXBUF_1664 1664 #define ICE_RXBUF_1536 1536 #define ICE_MAX_CHAINED_RX_BUFS 5 #define ICE_MAX_BUF_TXD 8 #define ICE_MIN_TX_LEN 17 +#define ICE_MAX_FRAME_LEGACY_RX 8320 /* The size limit for a transmit buffer in a descriptor is (16K - 1). * In order to align with the read requests we will align the value to @@ -110,15 +112,16 @@ static inline int ice_skb_pad(void) (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) +#define ICE_RX_DESC_UNUSED(R) \ + ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->first_desc - (R)->next_to_use - 1) + #define ICE_RING_QUARTER(R) ((R)->count >> 2) #define ICE_TX_FLAGS_TSO BIT(0) #define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_SW_VLAN BIT(2) -/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be - * freed instead of returned like skb packets. - */ -#define ICE_TX_FLAGS_DUMMY_PKT BIT(3) +/* Free, was ICE_TX_FLAGS_DUMMY_PKT */ #define ICE_TX_FLAGS_TSYN BIT(4) #define ICE_TX_FLAGS_IPV4 BIT(5) #define ICE_TX_FLAGS_IPV6 BIT(6) @@ -134,6 +137,7 @@ static inline int ice_skb_pad(void) #define ICE_XDP_TX BIT(1) #define ICE_XDP_REDIR BIT(2) #define ICE_XDP_EXIT BIT(3) +#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED #define ICE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) @@ -142,15 +146,44 @@ static inline int ice_skb_pad(void) #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) +/** + * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion + * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required + * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree() + * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA + * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats + * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats + * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats + * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats + */ +enum ice_tx_buf_type { + ICE_TX_BUF_EMPTY = 0U, + ICE_TX_BUF_DUMMY, + ICE_TX_BUF_FRAG, + ICE_TX_BUF_SKB, + ICE_TX_BUF_XDP_TX, + ICE_TX_BUF_XDP_XMIT, + ICE_TX_BUF_XSK_TX, +}; + struct ice_tx_buf { - struct ice_tx_desc *next_to_watch; union { - struct sk_buff *skb; - void *raw_buf; /* used for XDP */ + struct ice_tx_desc *next_to_watch; + u32 rs_idx; + }; + union { + void *raw_buf; /* used for XDP_TX and FDir rules */ + struct sk_buff *skb; /* used for .ndo_start_xmit() */ + struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */ + struct xdp_buff *xdp; /* used for XDP_TX ZC */ }; unsigned int bytecount; - unsigned short gso_segs; - u32 tx_flags; + union { + unsigned int gso_segs; + unsigned int nr_frags; /* used for mbuf XDP */ + }; + u32 type:16; /* &ice_tx_buf_type */ + u32 tx_flags:16; DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_ADDR(dma); }; @@ -170,7 +203,9 @@ struct ice_rx_buf { dma_addr_t dma; struct page *page; unsigned int page_offset; - u16 pagecnt_bias; + unsigned int pgcnt; + unsigned int act; + unsigned int pagecnt_bias; }; struct ice_q_stats { @@ -273,42 +308,44 @@ struct ice_rx_ring { struct ice_vsi *vsi; /* Backreference to associated VSI */ struct ice_q_vector *q_vector; /* Backreference to associated vector */ u8 __iomem *tail; + u16 q_index; /* Queue number of ring */ + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 next_to_alloc; + /* CL2 - 2nd cacheline starts here */ union { struct ice_rx_buf *rx_buf; struct xdp_buff **xdp_buf; }; - /* CL2 - 2nd cacheline starts here */ - struct xdp_rxq_info xdp_rxq; + struct xdp_buff xdp; /* CL3 - 3rd cacheline starts here */ - u16 q_index; /* Queue number of ring */ - - u16 count; /* Number of descriptors */ - u16 reg_idx; /* HW register index of the ring */ + struct bpf_prog *xdp_prog; + u16 rx_offset; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; - u16 rx_offset; - u16 rx_buf_len; + u16 first_desc; /* stats structs */ struct ice_ring_stats *ring_stats; struct rcu_head rcu; /* to avoid race on free */ - /* CL4 - 3rd cacheline starts here */ + /* CL4 - 4th cacheline starts here */ struct ice_channel *ch; - struct bpf_prog *xdp_prog; struct ice_tx_ring *xdp_ring; struct xsk_buff_pool *xsk_pool; - struct sk_buff *skb; dma_addr_t dma; /* physical address of ring */ u64 cached_phctime; + u16 rx_buf_len; u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) u8 flags; + /* CL5 - 5th cacheline starts here */ + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; struct ice_tx_ring { @@ -326,12 +363,11 @@ struct ice_tx_ring { struct xsk_buff_pool *xsk_pool; u16 next_to_use; u16 next_to_clean; - u16 next_rs; - u16 next_dd; u16 q_handle; /* Queue handle per TC */ u16 reg_idx; /* HW register index of the ring */ u16 count; /* Number of descriptors */ u16 q_index; /* Queue number of ring */ + u16 xdp_tx_active; /* stats structs */ struct ice_ring_stats *ring_stats; /* CL3 - 3rd cacheline starts here */ @@ -342,7 +378,6 @@ struct ice_tx_ring { spinlock_t tx_lock; u32 txq_teid; /* Added Tx queue TEID */ /* CL4 - 4th cacheline starts here */ - u16 xdp_tx_active; #define ICE_TX_FLAGS_RING_XDP BIT(0) #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1) #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) @@ -431,7 +466,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) union ice_32b_rx_flex_desc; -bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count); +bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 25f04266c668..7bc5aa340c7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -221,128 +221,217 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) } /** + * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer + * @dev: device for DMA mapping + * @tx_buf: Tx buffer to clean + * @bq: XDP bulk flush struct + */ +static void +ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, + struct xdp_frame_bulk *bq) +{ + dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + + switch (tx_buf->type) { + case ICE_TX_BUF_XDP_TX: + page_frag_free(tx_buf->raw_buf); + break; + case ICE_TX_BUF_XDP_XMIT: + xdp_return_frame_bulk(tx_buf->xdpf, bq); + break; + } + + tx_buf->type = ICE_TX_BUF_EMPTY; +} + +/** * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring * @xdp_ring: XDP ring to clean */ -static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) +static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) { - unsigned int total_bytes = 0, total_pkts = 0; - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); - u16 ntc = xdp_ring->next_to_clean; - struct ice_tx_desc *next_dd_desc; - u16 next_dd = xdp_ring->next_dd; - struct ice_tx_buf *tx_buf; - int i; + int total_bytes = 0, total_pkts = 0; + struct device *dev = xdp_ring->dev; + u32 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *tx_desc; + u32 cnt = xdp_ring->count; + struct xdp_frame_bulk bq; + u32 frags, xdp_tx = 0; + u32 ready_frames = 0; + u32 idx; + u32 ret; + + idx = xdp_ring->tx_buf[ntc].rs_idx; + tx_desc = ICE_TX_DESC(xdp_ring, idx); + if (tx_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { + if (idx >= ntc) + ready_frames = idx - ntc + 1; + else + ready_frames = idx + cnt - ntc + 1; + } - next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd); - if (!(next_dd_desc->cmd_type_offset_bsz & - cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) - return; + if (unlikely(!ready_frames)) + return 0; + ret = ready_frames; + + xdp_frame_bulk_init(&bq); + rcu_read_lock(); /* xdp_return_frame_bulk() */ - for (i = 0; i < tx_thresh; i++) { - tx_buf = &xdp_ring->tx_buf[ntc]; + while (ready_frames) { + struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; + struct ice_tx_buf *head = tx_buf; + /* bytecount holds size of head + frags */ total_bytes += tx_buf->bytecount; - /* normally tx_buf->gso_segs was taken but at this point - * it's always 1 for us - */ + frags = tx_buf->nr_frags; total_pkts++; - - page_frag_free(tx_buf->raw_buf); - dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - tx_buf->raw_buf = NULL; + /* count head + frags */ + ready_frames -= frags + 1; + xdp_tx++; ntc++; - if (ntc >= xdp_ring->count) + if (ntc == cnt) ntc = 0; + + for (int i = 0; i < frags; i++) { + tx_buf = &xdp_ring->tx_buf[ntc]; + + ice_clean_xdp_tx_buf(dev, tx_buf, &bq); + ntc++; + if (ntc == cnt) + ntc = 0; + } + + ice_clean_xdp_tx_buf(dev, head, &bq); } - next_dd_desc->cmd_type_offset_bsz = 0; - xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh; - if (xdp_ring->next_dd > xdp_ring->count) - xdp_ring->next_dd = tx_thresh - 1; + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); + + tx_desc->cmd_type_offset_bsz = 0; xdp_ring->next_to_clean = ntc; + xdp_ring->xdp_tx_active -= xdp_tx; ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); + + return ret; } /** - * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission - * @data: packet data pointer - * @size: packet data size + * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission + * @xdp: XDP buffer to be placed onto Tx descriptors * @xdp_ring: XDP ring for transmission + * @frame: whether this comes from .ndo_xdp_xmit() */ -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) +int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, + bool frame) { - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); - u16 i = xdp_ring->next_to_use; + struct skb_shared_info *sinfo = NULL; + u32 size = xdp->data_end - xdp->data; + struct device *dev = xdp_ring->dev; + u32 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_head; struct ice_tx_buf *tx_buf; - dma_addr_t dma; + u32 cnt = xdp_ring->count; + void *data = xdp->data; + u32 nr_frags = 0; + u32 free_space; + u32 frag = 0; + + free_space = ICE_DESC_UNUSED(xdp_ring); + if (free_space < ICE_RING_QUARTER(xdp_ring)) + free_space += ice_clean_xdp_irq(xdp_ring); + + if (unlikely(!free_space)) + goto busy; + + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + if (free_space < nr_frags + 1) + goto busy; + } - if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh) - ice_clean_xdp_irq(xdp_ring); + tx_desc = ICE_TX_DESC(xdp_ring, ntu); + tx_head = &xdp_ring->tx_buf[ntu]; + tx_buf = tx_head; - if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { - xdp_ring->ring_stats->tx_stats.tx_busy++; - return ICE_XDP_CONSUMED; - } + for (;;) { + dma_addr_t dma; - dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); - if (dma_mapping_error(xdp_ring->dev, dma)) - return ICE_XDP_CONSUMED; + dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_unmap; - tx_buf = &xdp_ring->tx_buf[i]; - tx_buf->bytecount = size; - tx_buf->gso_segs = 1; - tx_buf->raw_buf = data; + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); - /* record length, and DMA address */ - dma_unmap_len_set(tx_buf, len, size); - dma_unmap_addr_set(tx_buf, dma, dma); + if (frame) { + tx_buf->type = ICE_TX_BUF_FRAG; + } else { + tx_buf->type = ICE_TX_BUF_XDP_TX; + tx_buf->raw_buf = data; + } - tx_desc = ICE_TX_DESC(xdp_ring, i); - tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0, - size, 0); + tx_desc->buf_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); - xdp_ring->xdp_tx_active++; - i++; - if (i == xdp_ring->count) { - i = 0; - tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); - xdp_ring->next_rs = tx_thresh - 1; + ntu++; + if (ntu == cnt) + ntu = 0; + + if (frag == nr_frags) + break; + + tx_desc = ICE_TX_DESC(xdp_ring, ntu); + tx_buf = &xdp_ring->tx_buf[ntu]; + + data = skb_frag_address(&sinfo->frags[frag]); + size = skb_frag_size(&sinfo->frags[frag]); + frag++; } - xdp_ring->next_to_use = i; - if (i > xdp_ring->next_rs) { - tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); - xdp_ring->next_rs += tx_thresh; + /* store info about bytecount and frag count in first desc */ + tx_head->bytecount = xdp_get_buff_len(xdp); + tx_head->nr_frags = nr_frags; + + if (frame) { + tx_head->type = ICE_TX_BUF_XDP_XMIT; + tx_head->xdpf = xdp->data_hard_start; } + /* update last descriptor from a frame with EOP */ + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); + + xdp_ring->xdp_tx_active++; + xdp_ring->next_to_use = ntu; + return ICE_XDP_TX; -} -/** - * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it - * @xdp: XDP buffer - * @xdp_ring: XDP Tx ring - * - * Returns negative on failure, 0 on success. - */ -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) -{ - struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); +dma_unmap: + for (;;) { + tx_buf = &xdp_ring->tx_buf[ntu]; + dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + if (tx_buf == tx_head) + break; + + if (!ntu) + ntu += cnt; + ntu--; + } + return ICE_XDP_CONSUMED; - if (unlikely(!xdpf)) - return ICE_XDP_CONSUMED; +busy: + xdp_ring->ring_stats->tx_stats.tx_busy++; - return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); + return ICE_XDP_CONSUMED; } /** @@ -354,14 +443,21 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) * should be called when a batch of packets has been processed in the * napi loop. */ -void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res) +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, + u32 first_idx) { + struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; + if (xdp_res & ICE_XDP_REDIR) xdp_do_flush_map(); if (xdp_res & ICE_XDP_TX) { if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); + /* store index of descriptor with RS bit set in the first + * ice_tx_buf of given NAPI batch + */ + tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); ice_xdp_ring_update_tail(xdp_ring); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index c7d2954dc9ea..115969ecdf7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -6,6 +6,36 @@ #include "ice.h" /** + * ice_set_rx_bufs_act - propagate Rx buffer action to frags + * @xdp: XDP buffer representing frame (linear and frags part) + * @rx_ring: Rx ring struct + * act: action to store onto Rx buffers related to XDP buffer parts + * + * Set action that should be taken before putting Rx buffer from first frag + * to one before last. Last one is handled by caller of this function as it + * is the EOP frag that is currently being processed. This function is + * supposed to be called only when XDP buffer contains frags. + */ +static inline void +ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, + const unsigned int act) +{ + const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + u32 first = rx_ring->first_desc; + u32 nr_frags = sinfo->nr_frags; + u32 cnt = rx_ring->count; + struct ice_rx_buf *buf; + + for (int i = 0; i < nr_frags; i++) { + buf = &rx_ring->rx_buf[first]; + buf->act = act; + + if (++first == cnt) + first = 0; + } +} + +/** * ice_test_staterr - tests bits in Rx descriptor status and error fields * @status_err_n: Rx descriptor status_error0 or status_error1 bits * @stat_err_bits: value to mask @@ -21,6 +51,28 @@ ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits) return !!(status_err_n & cpu_to_le16(stat_err_bits)); } +/** + * ice_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * If the buffer is an EOP buffer, this function exits returning false, + * otherwise return true indicating that this is in fact a non-EOP buffer. + */ +static inline bool +ice_is_non_eop(const struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc) +{ + /* if we are the last buffer then there is nothing else to do */ +#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) + if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF))) + return false; + + rx_ring->ring_stats->rx_stats.non_eop_descs++; + + return true; +} + static inline __le64 ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) { @@ -70,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring) writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); } -void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res); +/** + * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU) + * @xdp_ring: XDP ring to produce the HW Tx descriptors on + * + * returns index of descriptor that had RS bit produced on + */ +static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) +{ + u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; + struct ice_tx_desc *tx_desc; + + tx_desc = ICE_TX_DESC(xdp_ring, rs_idx); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + + return rs_idx; +} + +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx); int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring); +int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, + bool frame); void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); void ice_process_skb_fields(struct ice_rx_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 375eb6493f0f..0e57bd1b85fd 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -237,16 +237,49 @@ static void ice_vf_clear_counters(struct ice_vf *vf) */ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) { + /* Close any IRQ mapping now */ + if (vf->vf_ops->irq_close) + vf->vf_ops->irq_close(vf); + ice_vf_clear_counters(vf); vf->vf_ops->clear_reset_trigger(vf); } /** + * ice_vf_recreate_vsi - Release and re-create the VF's VSI + * @vf: VF to recreate the VSI for + * + * This is only called when a single VF is being reset (i.e. VVF, VFLR, host + * VF configuration change, etc) + * + * It releases and then re-creates a new VSI. + */ +static int ice_vf_recreate_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int err; + + ice_vf_vsi_release(vf); + + err = vf->vf_ops->create_vsi(vf); + if (err) { + dev_err(ice_pf_to_dev(pf), + "Failed to recreate the VF%u's VSI, error %d\n", + vf->vf_id, err); + return err; + } + + return 0; +} + +/** * ice_vf_rebuild_vsi - rebuild the VF's VSI * @vf: VF to rebuild the VSI for * * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the * host, PFR, CORER, etc.). + * + * It reprograms the VSI configuration back into hardware. */ static int ice_vf_rebuild_vsi(struct ice_vf *vf) { @@ -256,7 +289,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) if (WARN_ON(!vsi)) return -EINVAL; - if (ice_vsi_rebuild(vsi, true)) { + if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) { dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", vf->vf_id); return -EIO; @@ -271,6 +304,21 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) } /** + * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild + * @vf: the VF being reset + * + * Perform reset tasks which must occur after the VSI has been re-created or + * rebuilt during a VF reset. + */ +static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) +{ + ice_vf_rebuild_host_cfg(vf); + ice_vf_set_initialized(vf); + + vf->vf_ops->post_vsi_rebuild(vf); +} + +/** * ice_is_any_vf_in_unicast_promisc - check if any VF(s) * are in unicast promiscuous mode * @pf: PF structure for accessing VF(s) @@ -495,7 +543,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); - vf->vf_ops->post_vsi_rebuild(vf); + ice_vf_post_vsi_rebuild(vf); mutex_unlock(&vf->cfg_lock); } @@ -639,14 +687,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ice_vf_pre_vsi_rebuild(vf); - if (vf->vf_ops->vsi_rebuild(vf)) { + if (ice_vf_recreate_vsi(vf)) { dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); err = -EFAULT; goto out_unlock; } - vf->vf_ops->post_vsi_rebuild(vf); + ice_vf_post_vsi_rebuild(vf); vsi = ice_get_vf_vsi(vf); if (WARN_ON(!vsi)) { err = -EINVAL; @@ -673,7 +721,7 @@ out_unlock: * ice_set_vf_state_qs_dis - Set VF queues state to disabled * @vf: pointer to the VF structure */ -void ice_set_vf_state_qs_dis(struct ice_vf *vf) +static void ice_set_vf_state_qs_dis(struct ice_vf *vf) { /* Clear Rx/Tx enabled queues flag */ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); @@ -681,9 +729,45 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf) clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); } +/** + * ice_set_vf_state_dis - Set VF state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_dis(struct ice_vf *vf) +{ + ice_set_vf_state_qs_dis(vf); + vf->vf_ops->clear_reset_state(vf); +} + /* Private functions only accessed from other virtualization files */ /** + * ice_initialize_vf_entry - Initialize a VF entry + * @vf: pointer to the VF structure + */ +void ice_initialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vfs *vfs; + + vfs = &pf->vfs; + + /* assign default capabilities */ + vf->spoofchk = true; + vf->num_vf_qs = vfs->num_qps_per; + ice_vc_set_default_allowlist(vf); + ice_virtchnl_set_dflt_ops(vf); + + /* ctrl_vsi_idx will be set to a valid value only when iAVF + * creates its first fdir rule. + */ + ice_vf_ctrl_invalidate_vsi(vf); + ice_vf_fdir_init(vf); + + mutex_init(&vf->cfg_lock); +} + +/** * ice_dis_vf_qs - Disable the VF queues * @vf: pointer to the VF structure */ @@ -924,18 +1008,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) vf->num_mac++; - if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { - status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, + if (is_valid_ether_addr(vf->hw_lan_addr)) { + status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); if (status) { dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", - &vf->hw_lan_addr.addr[0], vf->vf_id, + &vf->hw_lan_addr[0], vf->vf_id, status); return status; } vf->num_mac++; - ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); + ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); } return 0; @@ -1115,11 +1199,16 @@ void ice_vf_ctrl_vsi_release(struct ice_vf *vf) */ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) { - struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_vsi_cfg_params params = {}; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL); + params.type = ICE_VSI_CTRL; + params.pi = ice_vf_get_port_info(vf); + params.vf = vf; + params.flags = ICE_VSI_FLAG_INIT; + + vsi = ice_vsi_setup(pf, ¶ms); if (!vsi) { dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); ice_vf_ctrl_invalidate_vsi(vf); @@ -1129,6 +1218,60 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) } /** + * ice_vf_init_host_cfg - Initialize host admin configuration + * @vf: VF to initialize + * @vsi: the VSI created at initialization + * + * Initialize the VF host configuration. Called during VF creation to setup + * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It + * should only be called during VF creation. + */ +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + struct ice_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + + err = ice_vsi_add_vlan_zero(vsi); + if (err) { + dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", + vf->vf_id); + return err; + } + + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + err = vlan_ops->ena_rx_filtering(vsi); + if (err) { + dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", + vf->vf_id); + return err; + } + + eth_broadcast_addr(broadcast); + err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", + vf->vf_id, err); + return err; + } + + vf->num_mac = 1; + + err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); + if (err) { + dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", + vf->vf_id); + return err; + } + + return 0; +} + +/** * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access * @vf: VF to remove access to VSI for */ @@ -1139,6 +1282,24 @@ void ice_vf_invalidate_vsi(struct ice_vf *vf) } /** + * ice_vf_vsi_release - Release the VF VSI and invalidate indexes + * @vf: pointer to the VF structure + * + * Release the VF associated with this VSI and then invalidate the VSI + * indexes. + */ +void ice_vf_vsi_release(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vsi_release(vsi); + ice_vf_invalidate_vsi(vf); +} + +/** * ice_vf_set_initialized - VF is ready for VIRTCHNL communication * @vf: VF to set in initialized state * diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 52bd9a3816bf..ef30f05b5d02 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -56,11 +56,13 @@ struct ice_mdd_vf_events { struct ice_vf_ops { enum ice_disq_rst_src reset_type; void (*free)(struct ice_vf *vf); + void (*clear_reset_state)(struct ice_vf *vf); void (*clear_mbx_register)(struct ice_vf *vf); void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr); bool (*poll_reset_status)(struct ice_vf *vf); void (*clear_reset_trigger)(struct ice_vf *vf); - int (*vsi_rebuild)(struct ice_vf *vf); + void (*irq_close)(struct ice_vf *vf); + int (*create_vsi)(struct ice_vf *vf); void (*post_vsi_rebuild)(struct ice_vf *vf); }; @@ -96,8 +98,8 @@ struct ice_vf { struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ - struct virtchnl_ether_addr dev_lan_addr; - struct virtchnl_ether_addr hw_lan_addr; + u8 dev_lan_addr[ETH_ALEN]; + u8 hw_lan_addr[ETH_ALEN]; struct ice_time_mac legacy_last_added_umac; DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF); DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF); @@ -213,7 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); bool ice_is_vf_disabled(struct ice_vf *vf); int ice_check_vf_ready_for_cfg(struct ice_vf *vf); -void ice_set_vf_state_qs_dis(struct ice_vf *vf); +void ice_set_vf_state_dis(struct ice_vf *vf); bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); void ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, @@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf) return -EOPNOTSUPP; } -static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) +static inline void ice_set_vf_state_dis(struct ice_vf *vf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 9c8ef2b01f0f..6f3293b793b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -23,6 +23,7 @@ #warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files" #endif +void ice_initialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); enum virtchnl_status_code ice_err_to_virt_err(int err); @@ -35,7 +36,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf); void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf); void ice_vf_ctrl_vsi_release(struct ice_vf *vf); struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi); void ice_vf_invalidate_vsi(struct ice_vf *vf); +void ice_vf_vsi_release(struct ice_vf *vf); void ice_vf_set_initialized(struct ice_vf *vf); #endif /* _ICE_VF_LIB_PRIVATE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index d4a4001b6e5d..f56fa94ff3d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); } -static const u32 ice_legacy_aq_to_vc_speed[15] = { +static const u32 ice_legacy_aq_to_vc_speed[] = { VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */ VIRTCHNL_LINK_SPEED_100MB, VIRTCHNL_LINK_SPEED_1GB, @@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = { VIRTCHNL_LINK_SPEED_40GB, VIRTCHNL_LINK_SPEED_40GB, VIRTCHNL_LINK_SPEED_40GB, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN, - VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */ }; /** @@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = { */ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) { - u32 speed; + /* convert a BIT() value into an array index */ + u32 index = fls(link_speed) - 1; - if (adv_link_support) { - /* convert a BIT() value into an array index */ - speed = ice_get_link_speed(fls(link_speed) - 1); - } else { + if (adv_link_support) + return ice_get_link_speed(index); + else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed)) /* Virtchnl speeds are not defined for every speed supported in * the hardware. To maintain compatibility with older AVF * drivers, while reporting the speed the new speed values are * resolved to the closest known virtchnl speeds */ - speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1]; - } + return ice_legacy_aq_to_vc_speed[index]; - return speed; + return VIRTCHNL_LINK_SPEED_UNKNOWN; } /* The mailbox overflow detection algorithm helps to check if there diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 5ecc0ee9a78e..b1ffb81893d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) /* outer VLAN ops regardless of port VLAN config */ vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; if (ice_vf_is_port_vlan_ena(vf)) { /* setup outer VLAN ops */ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; @@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; } else { + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else @@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; + /* all Rx traffic should be in the domain of the + * assigned port VLAN, so prevent disabling Rx VLAN + * filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; } else { + vlan_ops->dis_rx_filtering = + ice_vsi_dis_rx_vlan_filtering; if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) vlan_ops->ena_rx_filtering = noop_vlan; else diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index dab3cd5d300e..e24e3f5017ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -507,7 +507,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; ether_addr_copy(vfres->vsi_res[0].default_mac_addr, - vf->hw_lan_addr.addr); + vf->hw_lan_addr); /* match guest capabilities */ vf->driver_caps = vfres->vf_cap_flags; @@ -1802,10 +1802,10 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) * was correctly specified over VIRTCHNL */ if ((ice_is_vc_addr_legacy(vc_ether_addr) && - is_zero_ether_addr(vf->hw_lan_addr.addr)) || + is_zero_ether_addr(vf->hw_lan_addr)) || ice_is_vc_addr_primary(vc_ether_addr)) { - ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); - ether_addr_copy(vf->hw_lan_addr.addr, mac_addr); + ether_addr_copy(vf->dev_lan_addr, mac_addr); + ether_addr_copy(vf->hw_lan_addr, mac_addr); } /* hardware and device MACs are already set, but its possible that the @@ -1836,7 +1836,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, int ret; /* device MAC already added */ - if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) + if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) return 0; if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { @@ -1891,8 +1891,8 @@ ice_update_legacy_cached_mac(struct ice_vf *vf, ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) return; - ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); - ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr); + ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr); } /** @@ -1906,15 +1906,15 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) u8 *mac_addr = vc_ether_addr->addr; if (!is_valid_ether_addr(mac_addr) || - !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + !ether_addr_equal(vf->dev_lan_addr, mac_addr)) return; /* allow the device MAC to be repopulated in the add flow and don't - * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant + * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant * to be persistent on VM reboot and across driver unload/load, which * won't work if we clear the hardware MAC here */ - eth_zero_addr(vf->dev_lan_addr.addr); + eth_zero_addr(vf->dev_lan_addr); ice_update_legacy_cached_mac(vf, vc_ether_addr); } @@ -1934,7 +1934,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, int status; if (!ice_can_vf_change_mac(vf) && - ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + ether_addr_equal(vf->dev_lan_addr, mac_addr)) return 0; status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); @@ -3733,7 +3733,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) int result; if (!is_unicast_ether_addr(mac_addr) || - ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) + ether_addr_equal(mac_addr, vf->hw_lan_addr)) continue; if (vf->pf_set_mac) { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index c6a58343d81d..e6ef6b303222 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -113,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) return -EINVAL; - if (!pf->vsi[vf->lan_vsi_idx]) + if (!ice_get_vf_vsi(vf)) return -EINVAL; return 0; @@ -494,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) vf_prof = fdir->fdir_prof[flow]; - vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); return; @@ -572,7 +572,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, pf = vf->pf; dev = ice_pf_to_dev(pf); hw = &pf->hw; - vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) return -EINVAL; @@ -1205,7 +1205,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, pf = vf->pf; dev = ice_pf_to_dev(pf); hw = &pf->hw; - vsi = pf->vsi[vf->lan_vsi_idx]; + vsi = ice_get_vf_vsi(vf); if (!vsi) { dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 907055b77af0..31565bbafa22 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -598,6 +598,112 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } /** + * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ + * @xdp_ring: XDP Tx ring + */ +static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) +{ + u16 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *tx_desc; + u16 cnt = xdp_ring->count; + struct ice_tx_buf *tx_buf; + u16 completed_frames = 0; + u16 xsk_frames = 0; + u16 last_rs; + int i; + + last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; + tx_desc = ICE_TX_DESC(xdp_ring, last_rs); + if (tx_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { + if (last_rs >= ntc) + completed_frames = last_rs - ntc + 1; + else + completed_frames = last_rs + cnt - ntc + 1; + } + + if (!completed_frames) + return; + + if (likely(!xdp_ring->xdp_tx_active)) { + xsk_frames = completed_frames; + goto skip; + } + + ntc = xdp_ring->next_to_clean; + for (i = 0; i < completed_frames; i++) { + tx_buf = &xdp_ring->tx_buf[ntc]; + + if (tx_buf->type == ICE_TX_BUF_XSK_TX) { + tx_buf->type = ICE_TX_BUF_EMPTY; + xsk_buff_free(tx_buf->xdp); + xdp_ring->xdp_tx_active--; + } else { + xsk_frames++; + } + + ntc++; + if (ntc >= xdp_ring->count) + ntc = 0; + } +skip: + tx_desc->cmd_type_offset_bsz = 0; + xdp_ring->next_to_clean += completed_frames; + if (xdp_ring->next_to_clean >= cnt) + xdp_ring->next_to_clean -= cnt; + if (xsk_frames) + xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); +} + +/** + * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX + * @xdp: XDP buffer to xmit + * @xdp_ring: XDP ring to produce descriptor onto + * + * note that this function works directly on xdp_buff, no need to convert + * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning + * side will be able to xsk_buff_free() it. + * + * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there + * was not enough space on XDP ring + */ +static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, + struct ice_tx_ring *xdp_ring) +{ + u32 size = xdp->data_end - xdp->data; + u32 ntu = xdp_ring->next_to_use; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + dma_addr_t dma; + + if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) { + ice_clean_xdp_irq_zc(xdp_ring); + if (!ICE_DESC_UNUSED(xdp_ring)) { + xdp_ring->ring_stats->tx_stats.tx_busy++; + return ICE_XDP_CONSUMED; + } + } + + dma = xsk_buff_xdp_get_dma(xdp); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); + + tx_buf = &xdp_ring->tx_buf[ntu]; + tx_buf->xdp = xdp; + tx_buf->type = ICE_TX_BUF_XSK_TX; + tx_desc = ICE_TX_DESC(xdp_ring, ntu); + tx_desc->buf_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, + 0, size, 0); + xdp_ring->xdp_tx_active++; + + if (++ntu == xdp_ring->count) + ntu = 0; + xdp_ring->next_to_use = ntu; + + return ICE_XDP_TX; +} + +/** * ice_run_xdp_zc - Executes an XDP program in zero-copy path * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program @@ -630,7 +736,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, case XDP_PASS: break; case XDP_TX: - result = ice_xmit_xdp_buff(xdp, xdp_ring); + result = ice_xmit_xdp_tx_zc(xdp, xdp_ring); if (result == ICE_XDP_CONSUMED) goto out_failure; break; @@ -760,7 +866,7 @@ construct_skb: if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); - ice_finalize_xdp_rx(xdp_ring, xdp_xmit); + ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { @@ -776,75 +882,6 @@ construct_skb: } /** - * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer - * @xdp_ring: XDP Tx ring - * @tx_buf: Tx buffer to clean - */ -static void -ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) -{ - xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); - xdp_ring->xdp_tx_active--; - dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); -} - -/** - * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ - * @xdp_ring: XDP Tx ring - */ -static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) -{ - u16 ntc = xdp_ring->next_to_clean; - struct ice_tx_desc *tx_desc; - u16 cnt = xdp_ring->count; - struct ice_tx_buf *tx_buf; - u16 xsk_frames = 0; - u16 last_rs; - int i; - - last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; - tx_desc = ICE_TX_DESC(xdp_ring, last_rs); - if ((tx_desc->cmd_type_offset_bsz & - cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) { - if (last_rs >= ntc) - xsk_frames = last_rs - ntc + 1; - else - xsk_frames = last_rs + cnt - ntc + 1; - } - - if (!xsk_frames) - return; - - if (likely(!xdp_ring->xdp_tx_active)) - goto skip; - - ntc = xdp_ring->next_to_clean; - for (i = 0; i < xsk_frames; i++) { - tx_buf = &xdp_ring->tx_buf[ntc]; - - if (tx_buf->raw_buf) { - ice_clean_xdp_tx_buf(xdp_ring, tx_buf); - tx_buf->raw_buf = NULL; - } else { - xsk_frames++; - } - - ntc++; - if (ntc >= xdp_ring->count) - ntc = 0; - } -skip: - tx_desc->cmd_type_offset_bsz = 0; - xdp_ring->next_to_clean += xsk_frames; - if (xdp_ring->next_to_clean >= cnt) - xdp_ring->next_to_clean -= cnt; - if (xsk_frames) - xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); -} - -/** * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor * @xdp_ring: XDP ring to produce the HW Tx descriptor on * @desc: AF_XDP descriptor to pull the DMA address and length from @@ -918,20 +955,6 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d } /** - * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU) - * @xdp_ring: XDP ring to produce the HW Tx descriptors on - */ -static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring) -{ - u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; - struct ice_tx_desc *tx_desc; - - tx_desc = ICE_TX_DESC(xdp_ring, ntu); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); -} - -/** * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on * @@ -1065,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) while (ntc != ntu) { struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; - if (tx_buf->raw_buf) - ice_clean_xdp_tx_buf(xdp_ring, tx_buf); - else + if (tx_buf->type == ICE_TX_BUF_XSK_TX) { + tx_buf->type = ICE_TX_BUF_EMPTY; + xsk_buff_free(tx_buf->xdp); + } else { xsk_frames++; - - tx_buf->raw_buf = NULL; + } ntc++; if (ntc >= xdp_ring->count) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3c0c35ecea10..03bc1e8af575 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter) } } +#ifdef CONFIG_IGB_HWMON +/** + * igb_set_i2c_bb - Init I2C interface + * @hw: pointer to hardware structure + **/ +static void igb_set_i2c_bb(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 i2cctl; + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + wrfl(); + + i2cctl = rd32(E1000_I2CPARAMS); + i2cctl |= E1000_I2CBB_EN + | E1000_I2C_CLK_OE_N + | E1000_I2C_DATA_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} +#endif + void igb_reset(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; @@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter) * interface. */ if (adapter->ets) - mac->ops.init_thermal_sensor_thresh(hw); + igb_set_i2c_bb(hw); + mac->ops.init_thermal_sensor_thresh(hw); } } #endif @@ -2810,6 +2835,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter, return 0; } +static int igb_tc_query_caps(struct igb_adapter *adapter, + struct tc_query_caps_base *base) +{ + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + static LIST_HEAD(igb_block_cb_list); static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -2818,6 +2859,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, struct igb_adapter *adapter = netdev_priv(dev); switch (type) { + case TC_QUERY_CAPS: + return igb_tc_query_caps(adapter, type_data); case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: @@ -2871,8 +2914,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) bpf_prog_put(old_prog); /* bpf is just replaced, RXQ and MTU are already setup */ - if (!need_reset) + if (!need_reset) { return 0; + } else { + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + } if (running) igb_open(dev); @@ -3117,21 +3166,12 @@ static void igb_init_mas(struct igb_adapter *adapter) **/ static s32 igb_init_i2c(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; s32 status = 0; - s32 i2cctl; /* I2C interface supported on i350 devices */ if (adapter->hw.mac.type != e1000_i350) return 0; - i2cctl = rd32(E1000_I2CPARAMS); - i2cctl |= E1000_I2CBB_EN - | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N - | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; - wr32(E1000_I2CPARAMS, i2cctl); - wrfl(); - /* Initialize the i2c bus which is controlled by the registers. * This bus will use the i2c_algo_bit structure that implements * the protocol through toggling of the 4 bits in the register. @@ -3194,8 +3234,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_pci_reg; - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); pci_save_state(pdev); @@ -3317,6 +3355,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -3521,6 +3560,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->ets = true; else adapter->ets = false; + /* Only enable I2C bit banging if an external thermal + * sensor is supported. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + hw->mac.ops.init_thermal_sensor_thresh(hw); if (igb_sysfs_init(adapter)) dev_err(&pdev->dev, "failed to allocate sysfs resources\n"); @@ -3626,7 +3671,6 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -3837,8 +3881,6 @@ static void igb_remove(struct pci_dev *pdev) kfree(adapter->shadow_vfta); free_netdev(netdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } @@ -6794,7 +6836,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) struct timespec64 ts; u32 tsauxc; - if (pin < 0 || pin >= IGB_N_PEROUT) + if (pin < 0 || pin >= IGB_N_SDP) return; spin_lock(&adapter->tmreg_lock); @@ -6802,7 +6844,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i354 || hw->mac.type == e1000_i350) { - s64 ns = timespec64_to_ns(&adapter->perout[pin].period); + s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period); u32 systiml, systimh, level_mask, level, rem; u64 systim, now; @@ -6850,8 +6892,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) ts.tv_nsec = (u32)systim; ts.tv_sec = ((u32)(systim >> 32)) & 0xFF; } else { - ts = timespec64_add(adapter->perout[pin].start, - adapter->perout[pin].period); + ts = timespec64_add(adapter->perout[tsintr_tt].start, + adapter->perout[tsintr_tt].period); } /* u32 conversion of tv_sec is safe until y2106 */ @@ -6860,7 +6902,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) tsauxc = rd32(E1000_TSAUXC); tsauxc |= TSAUXC_EN_TT0; wr32(E1000_TSAUXC, tsauxc); - adapter->perout[pin].start = ts; + adapter->perout[tsintr_tt].start = ts; spin_unlock(&adapter->tmreg_lock); } @@ -6874,7 +6916,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) struct ptp_clock_event event; struct timespec64 ts; - if (pin < 0 || pin >= IGB_N_EXTTS) + if (pin < 0 || pin >= IGB_N_SDP) return; if (hw->mac.type == e1000_82580 || diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index a15927e77272..a1d815af507d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) rd32(IGC_MPC); } +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + static struct igc_mac_operations igc_mac_ops_base = { .init_hw = igc_init_hw_base, .check_for_link = igc_check_for_copper_link, diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ce530f5fd7bd..7a992befca24 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -7,6 +7,8 @@ /* forward declaration */ void igc_rx_fifo_flush_base(struct igc_hw *hw); void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); /* Transmit Descriptor - Advanced */ union igc_adv_tx_desc { diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index a7b22639cfcd..9dec3563ce3a 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -475,7 +475,9 @@ #define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ #define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ #define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ #define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ #define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ #define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ #define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ @@ -522,6 +524,7 @@ /* Transmit Scheduling */ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 44b1740dc098..2928a6c73692 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (tx_buffer->next_to_watch && time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != + readl(tx_ring->tail))) { /* detected Tx unit hang */ netdev_err(tx_ring->netdev, "Detected Tx Unit Hang\n" @@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) } /** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** * igc_get_stats64 - Get System Network Statistics * @netdev: network interface device structure * @stats: rtnl_link_stats64 pointer @@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work) case SPEED_100: case SPEED_1000: case SPEED_2500: - adapter->tx_timeout_factor = 7; + adapter->tx_timeout_factor = 1; break; } @@ -5958,6 +5978,7 @@ static bool validate_schedule(struct igc_adapter *adapter, const struct tc_taprio_qopt_offload *qopt) { int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; struct timespec64 now; size_t n; @@ -5970,8 +5991,10 @@ static bool validate_schedule(struct igc_adapter *adapter, * in the future, it will hold all the packets until that * time, causing a lot of TX Hangs, so to avoid that, we * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. */ - if (!is_base_time_past(qopt->base_time, &now)) + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) return false; for (n = 0; n < qopt->num_entries; n++) { @@ -6041,6 +6064,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; u32 start_time = 0, end_time = 0; size_t n; int i; @@ -6053,7 +6077,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, if (qopt->base_time < 0) return -ERANGE; - if (adapter->base_time) + if (igc_is_device_id_i225(hw) && adapter->base_time) return -EALREADY; if (!validate_schedule(adapter, qopt)) @@ -6201,12 +6225,35 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) + caps->gate_mask_per_txq = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct igc_adapter *adapter = netdev_priv(dev); switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); case TC_SETUP_QDISC_TAPRIO: return igc_tsn_enable_qbv_scheduling(adapter, type_data); @@ -6320,6 +6367,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_rx_mode = igc_set_rx_mode, .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, .ndo_get_stats64 = igc_get_stats64, .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, @@ -6430,8 +6478,6 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_pci_reg; - pci_enable_pcie_error_reporting(pdev); - err = pci_enable_ptm(pdev, NULL); if (err < 0) dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); @@ -6529,6 +6575,9 @@ static int igc_probe(struct pci_dev *pdev, netdev->mpls_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= netdev->vlan_features; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; @@ -6636,7 +6685,6 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -6684,8 +6732,6 @@ static void igc_remove(struct pci_dev *pdev) free_netdev(netdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 8dbb9f903ca7..4e10ced736db 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, ts = ns_to_timespec64(ns); if (rq->perout.index == 1) { if (use_freq) { - tsauxc_mask = IGC_TSAUXC_EN_CLK1; + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; tsim_mask = 0; } else { tsauxc_mask = IGC_TSAUXC_EN_TT1; @@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, freqout = IGC_FREQOUT1; } else { if (use_freq) { - tsauxc_mask = IGC_TSAUXC_EN_CLK0; + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; tsim_mask = 0; } else { tsauxc_mask = IGC_TSAUXC_EN_TT0; @@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, tsauxc = rd32(IGC_TSAUXC); tsim = rd32(IGC_TSIM); if (rq->perout.index == 1) { - tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); tsim &= ~IGC_TSICR_TT1; } else { - tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); tsim &= ~IGC_TSICR_TT0; } if (on) { @@ -415,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, * * We need to convert the system time value stored in the RX/TXSTMP registers * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. **/ -static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamps, - u64 systim) +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) { switch (adapter->hw.mac.type) { case igc_i225: @@ -428,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, systim & 0xFFFFFFFF); break; default: - break; + return -EINVAL; } + return 0; } /** @@ -650,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) regval = rd32(IGC_TXSTMPL); regval |= (u64)rd32(IGC_TXSTMPH) << 32; - igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; switch (adapter->link_speed) { case SPEED_10: diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index bb10d7b65232..a386c8d61dbf 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Intel Corporation */ #include "igc.h" +#include "igc_hw.h" #include "igc_tsn.h" static bool is_any_launchtime(struct igc_adapter *adapter) @@ -92,7 +93,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) tqavctrl = rd32(IGC_TQAVCTRL); tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | - IGC_TQAVCTRL_ENHANCED_QAV); + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { @@ -117,20 +119,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ktime_t base_time, systim; int i; - cycle = adapter->cycle_time; - base_time = adapter->base_time; - wr32(IGC_TSAUXC, 0); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); - tqavctrl = rd32(IGC_TQAVCTRL); - tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; - wr32(IGC_TQAVCTRL, tqavctrl); - - wr32(IGC_QBVCYCLET_S, cycle); - wr32(IGC_QBVCYCLET, cycle); - for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; @@ -233,21 +225,46 @@ skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + nsec = rd32(IGC_SYSTIML); sec = rd32(IGC_SYSTIMH); systim = ktime_set(sec, nsec); - if (ktime_compare(systim, base_time) > 0) { - s64 n; + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); - n = div64_s64(ktime_sub_ns(systim, base_time), cycle); base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } else { + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if there is a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (igc_is_device_id_i226(hw) && + !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; } - baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); wr32(IGC_BASET_L, baset_l); return 0; @@ -274,17 +291,14 @@ int igc_tsn_reset(struct igc_adapter *adapter) int igc_tsn_offload_apply(struct igc_adapter *adapter) { - int err; + struct igc_hw *hw = &adapter->hw; - if (netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) { schedule_work(&adapter->reset_task); return 0; } - err = igc_tsn_enable_offload(adapter); - if (err < 0) - return err; + igc_tsn_reset(adapter); - adapter->flags = igc_tsn_new_flags(adapter); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index aeeb34e64610..e27af72aada8 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -29,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, if (old_prog) bpf_prog_put(old_prog); + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + if (if_running) igc_open(dev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index bc68b8f2176d..8736ca4b2628 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -73,6 +73,8 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + /* Attempt to maximize the headroom available for incoming frames. We * use a 2K buffer for receives and need 1536/1534 to store the data for * the frame. This leaves us with 512 bytes of room. From that we need diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 38c4609bd429..878dd8dff528 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { + bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); u32 links_reg, links_orig; u32 i; /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ - if (ixgbe_need_crosstalk_fix(hw)) { + if (crosstalk_fix_active) { u32 sfp_cage_full; switch (hw->mac.type) { @@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { - if (links_reg & IXGBE_LINKS_UP) + if (links_reg & IXGBE_LINKS_UP) { + if (crosstalk_fix_active) { + /* Check the link state again after a delay + * to filter out spurious link up + * notifications. + */ + mdelay(5); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (!(links_reg & IXGBE_LINKS_UP)) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return 0; + } + } *link_up = true; - else + } else { *link_up = false; + } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 53a969e34883..13a6fca31004 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -557,8 +557,10 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) /** * ixgbe_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct + * @extack: extack point to fill failure reason **/ -static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) +static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, + struct netlink_ext_ack *extack) { struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -570,23 +572,22 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) int i; if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { - netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", - xs->id.proto); + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload"); return -EINVAL; } if (xs->props.mode != XFRM_MODE_TRANSPORT) { - netdev_err(dev, "Unsupported mode for ipsec offload\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload"); return -EINVAL; } if (ixgbe_ipsec_check_mgmt_ip(xs)) { - netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); + NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters"); return -EINVAL; } if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { - netdev_err(dev, "Unsupported ipsec offload type\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type"); return -EINVAL; } @@ -594,14 +595,14 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) struct rx_sa rsa; if (xs->calg) { - netdev_err(dev, "Compression offload not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported"); return -EINVAL; } /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, true); if (ret < 0) { - netdev_err(dev, "No space for SA in Rx table!\n"); + NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!"); return ret; } sa_idx = (u16)ret; @@ -616,7 +617,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* get the key and salt */ ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); if (ret) { - netdev_err(dev, "Failed to get key data for Rx SA table\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; } @@ -676,7 +677,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { /* no match and no empty slot */ - netdev_err(dev, "No space for SA in Rx IP SA table\n"); + NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table"); memset(&rsa, 0, sizeof(rsa)); return -ENOSPC; } @@ -711,7 +712,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { - netdev_err(dev, "No space for SA in Tx table\n"); + NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table"); return ret; } sa_idx = (u16)ret; @@ -725,7 +726,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); if (ret) { - netdev_err(dev, "Failed to get key data for Tx SA table\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); return ret; } @@ -950,7 +951,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); /* set up the HW offload */ - err = ixgbe_ipsec_add_sa(xs); + err = ixgbe_ipsec_add_sa(xs, NULL); if (err) goto err_aead; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ab8370c413f3..773c35fecace 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6647,7 +6647,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; - rx_ring->xdp_prog = adapter->xdp_prog; + WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog); return 0; err: @@ -6778,6 +6778,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) } /** + * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @adapter: device handle, pointer to adapter + */ +static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) +{ + if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + return IXGBE_RXBUFFER_2K; + else + return IXGBE_RXBUFFER_3K; +} + +/** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (adapter->xdp_prog) { - int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; + if (ixgbe_enabled_xdp_adapter(adapter)) { + int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; - if (new_frame_size > ixgbe_rx_bufsz(ring)) { - e_warn(probe, "Requested MTU size is not supported with XDP\n"); - return -EINVAL; - } + if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; } } @@ -8937,7 +8943,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) int regnum = addr; if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; + return mdiobus_c45_read(adapter->mii_bus, prtad, + devad, regnum); return mdiobus_read(adapter->mii_bus, prtad, regnum); } @@ -8960,7 +8967,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, int regnum = addr; if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; + return mdiobus_c45_write(adapter->mii_bus, prtad, devad, + regnum, value); return mdiobus_write(adapter->mii_bus, prtad, regnum, value); } @@ -10297,14 +10305,15 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) synchronize_rcu(); err = ixgbe_setup_tc(dev, adapter->hw_tcs); - if (err) { - rcu_assign_pointer(adapter->xdp_prog, old_prog); + if (err) return -EINVAL; - } + if (!prog) + xdp_features_clear_redirect_target(dev); } else { - for (i = 0; i < adapter->num_rx_queues; i++) - (void)xchg(&adapter->rx_ring[i]->xdp_prog, - adapter->xdp_prog); + for (i = 0; i < adapter->num_rx_queues; i++) { + WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } } if (old_prog) @@ -10320,6 +10329,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (adapter->xdp_ring[i]->xsk_pool) (void)ixgbe_xsk_wakeup(adapter->netdev, i, XDP_WAKEUP_RX); + xdp_features_set_redirect_target(dev, true); } return 0; @@ -10808,8 +10818,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_reg; } - pci_enable_pcie_error_reporting(pdev); - pci_set_master(pdev); pci_save_state(pdev); @@ -11017,6 +11025,9 @@ skip_sriov: netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); @@ -11237,7 +11248,6 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -11326,8 +11336,6 @@ static void ixgbe_remove(struct pci_dev *pdev) disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); - pci_disable_pcie_error_reporting(pdev); - if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 24aa97f993ca..689470c1e8ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) } /** - * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags + * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, - int regnum, u32 gssr) +static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, + int regnum, u32 gssr) { u32 hwaddr, cmd; s32 data; @@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, return -EBUSY; hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | - IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; - } + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | + IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; - /* For a clause 45 access the address cycle just completed, we still - * need to do the read command, otherwise just get the data - */ - if (!(regnum & MII_ADDR_C45)) - goto do_mii_bus_read; + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); + +mii_bus_read_done: + hw->mac.ops.release_swfw_sync(hw, gssr); + return data; +} + +/** + * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @devad: device address to read + * @regnum: register number + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, + int devad, int regnum, u32 gssr) +{ + u32 hwaddr, cmd; + s32 data; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + hwaddr |= devad << 16 | regnum; + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; + + data = ixgbe_msca_cmd(hw, cmd); + if (data < 0) + goto mii_bus_read_done; cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; -do_mii_bus_read: data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); @@ -730,15 +751,15 @@ mii_bus_read_done: } /** - * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags + * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @val: value to write * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, - int regnum, u16 val, u32 gssr) +static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, + int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; s32 err; @@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | - IXGBE_MSCA_MDI_COMMAND; - } + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + err = ixgbe_msca_cmd(hw, cmd); + + hw->mac.ops.release_swfw_sync(hw, gssr); + return err; +} + +/** + * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @devad: device address to read + * @regnum: register number + * @val: value to write + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, + int devad, int regnum, u16 val, + u32 gssr) +{ + u32 hwaddr, cmd; + s32 err; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + hwaddr |= devad << 16 | regnum; + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - /* For clause 45 this is an address cycle, for clause 22 this is the - * entire transaction - */ err = ixgbe_msca_cmd(hw, cmd); - if (err < 0 || !(regnum & MII_ADDR_C45)) + if (err < 0) goto mii_bus_write_done; cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; @@ -774,70 +818,144 @@ mii_bus_write_done: } /** - * ixgbe_mii_bus_read - Read a clause 22/45 register + * ixgbe_mii_bus_read_c22 - Read a clause 22 register * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number **/ -static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); + return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); } /** - * ixgbe_mii_bus_write - Write a clause 22/45 register + * ixgbe_mii_bus_read_c45 - Read a clause 45 register * @bus: pointer to mii_bus structure which points to our driver private + * @devad: device address to read * @addr: address * @regnum: register number + **/ +static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, + int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + * ixgbe_mii_bus_write_c22 - Write a clause 22 register + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); +} + +/** + * ixgbe_mii_bus_write_c45 - Write a clause 45 register + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @devad: device address to read + * @regnum: register number * @val: value to write **/ -static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, - u16 val) +static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, + int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); + return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, + gssr); } /** - * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a + * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number **/ -static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr, - int regnum) +static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, + int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); + return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); } /** - * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a + * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @devad: device address to read + * @regnum: register number + **/ +static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, + int devad, int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number * @val: value to write **/ -static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr, - int regnum, u16 val) +static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, + int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); + return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @devad: device address to read + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, + int devad, int regnum, u16 val) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, + gssr); } /** @@ -855,9 +973,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn) rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); if (rp_pdev && rp_pdev->subordinate) { bus = rp_pdev->subordinate->number; + pci_dev_put(rp_pdev); return pci_get_domain_bus_and_slot(0, bus, 0); } + pci_dev_put(rp_pdev); return NULL; } @@ -874,6 +994,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct pci_dev *func0_pdev; + bool has_mii = false; /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 @@ -884,15 +1005,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); if (func0_pdev) { if (func0_pdev == pdev) - return true; - else - return false; + has_mii = true; + goto out; } func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); if (func0_pdev == pdev) - return true; + has_mii = true; - return false; +out: + pci_dev_put(func0_pdev); + return has_mii; } /** @@ -905,8 +1027,11 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) **/ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) { - s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); - s32 (*read)(struct mii_bus *bus, int addr, int regnum); + s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); + s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum); + s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, + u16 val); + s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum); struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; @@ -925,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_1G_T_L: if (!ixgbe_x550em_a_has_mii(hw)) return 0; - read = &ixgbe_x550em_a_mii_bus_read; - write = &ixgbe_x550em_a_mii_bus_write; + read_c22 = ixgbe_x550em_a_mii_bus_read_c22; + write_c22 = ixgbe_x550em_a_mii_bus_write_c22; + read_c45 = ixgbe_x550em_a_mii_bus_read_c45; + write_c45 = ixgbe_x550em_a_mii_bus_write_c45; break; default: - read = &ixgbe_mii_bus_read; - write = &ixgbe_mii_bus_write; + read_c22 = ixgbe_mii_bus_read_c22; + write_c22 = ixgbe_mii_bus_write_c22; + read_c45 = ixgbe_mii_bus_read_c45; + write_c45 = ixgbe_mii_bus_write_c45; break; } @@ -938,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) if (!bus) return -ENOMEM; - bus->read = read; - bus->write = write; + bus->read = read_c22; + bus->write = write_c22; + bus->read_c45 = read_c45; + bus->write_c45 = write_c45; /* Use the position of the device in the PCI hierarchy as the id */ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index c1cf540d162a..66cf17f19408 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -257,8 +257,10 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, /** * ixgbevf_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct + * @extack: extack point to fill failure reason **/ -static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) +static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, + struct netlink_ext_ack *extack) { struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; @@ -270,18 +272,17 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) ipsec = adapter->ipsec; if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { - netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", - xs->id.proto); + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload"); return -EINVAL; } if (xs->props.mode != XFRM_MODE_TRANSPORT) { - netdev_err(dev, "Unsupported mode for ipsec offload\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload"); return -EINVAL; } if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { - netdev_err(dev, "Unsupported ipsec offload type\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type"); return -EINVAL; } @@ -289,14 +290,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) struct rx_sa rsa; if (xs->calg) { - netdev_err(dev, "Compression offload not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported"); return -EINVAL; } /* find the first unused index */ ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); if (ret < 0) { - netdev_err(dev, "No space for SA in Rx table!\n"); + NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!"); return ret; } sa_idx = (u16)ret; @@ -311,7 +312,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) /* get the key and salt */ ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); if (ret) { - netdev_err(dev, "Failed to get key data for Rx SA table\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; } @@ -350,7 +351,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) /* find the first unused index */ ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { - netdev_err(dev, "No space for SA in Tx table\n"); + NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table"); return ret; } sa_idx = (u16)ret; @@ -364,7 +365,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); if (ret) { - netdev_err(dev, "Failed to get key data for Tx SA table\n"); + NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); return ret; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ea0a230c1153..a44e4bd56142 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4634,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC; /* MTU range: 68 - 1504 or 9710 */ netdev->min_mtu = ETH_MIN_MTU; |