diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
86 files changed, 5224 insertions, 2922 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 551de8c2fef2..f703fa58458e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3019,7 +3019,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, * applicable for weak-ordered memory model archs, * such as IA-64). */ - wmb(); + dma_wmb(); tx_ring->next_to_use = i; } @@ -4540,7 +4540,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, * applicable for weak-ordered memory model archs, * such as IA-64). */ - wmb(); + dma_wmb(); writel(i, adapter->hw.hw_addr + rx_ring->rdt); } } @@ -4655,7 +4655,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, * applicable for weak-ordered memory model archs, * such as IA-64). */ - wmb(); + dma_wmb(); writel(i, hw->hw_addr + rx_ring->rdt); } } diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index f86d55657959..4b103cca8a39 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -680,7 +680,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); ctrl = er32(CTRL); diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index b9309302c29e..2c1bab377b2a 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -959,7 +959,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(TCTL, tctl); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); /* Must acquire the MDIO ownership before MAC reset. * Ownership defaults to firmware after a reset. diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index fd550dee4982..63c3c79380a1 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -222,6 +222,9 @@ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ +/* PCIm function state */ +#define E1000_STATUS_PCIM_STATE 0x40000000 + #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index be13227f1697..34cd67951aec 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -186,12 +186,13 @@ struct e1000_phy_regs { /* board specific private data structure */ struct e1000_adapter { - struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct timer_list blink_timer; struct work_struct reset_task; - struct work_struct watchdog_task; + struct delayed_work watchdog_task; + + struct workqueue_struct *e1000_workqueue; const struct e1000_info *ei; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 02ebf208f48b..08342698386d 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1014,7 +1014,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); /* Test each interrupt */ for (i = 0; i < 10; i++) { @@ -1046,7 +1046,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) ew32(IMC, mask); ew32(ICS, mask); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); if (adapter->test_icr & mask) { *data = 3; @@ -1064,7 +1064,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) ew32(IMS, mask); ew32(ICS, mask); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); if (!(adapter->test_icr & mask)) { *data = 4; @@ -1082,7 +1082,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) ew32(IMC, ~mask & 0x00007FFF); ew32(ICS, ~mask & 0x00007FFF); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); if (adapter->test_icr) { *data = 5; @@ -1094,7 +1094,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); /* Unhook test interrupt handler */ free_irq(irq, netdev); @@ -1470,7 +1470,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) */ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); return 0; } @@ -1584,7 +1584,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) hw->phy.media_type == e1000_media_type_internal_serdes) { ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); break; } /* Fall Through */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index cdae0efde8e6..395b05701480 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -271,7 +271,7 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) u16 count = 20; do { - usleep_range(5000, 10000); + usleep_range(5000, 6000); } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); msleep(30); @@ -405,7 +405,7 @@ out: /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); + usleep_range(10000, 11000); e1000_gate_hw_phy_config_ich8lan(hw, false); } @@ -531,7 +531,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->id = 0; while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && (i++ < 100)) { - usleep_range(1000, 2000); + usleep_range(1000, 1100); ret_val = e1000e_get_phy_id(hw); if (ret_val) return ret_val; @@ -1244,7 +1244,7 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) goto out; } - usleep_range(10000, 20000); + usleep_range(10000, 11000); } e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); @@ -1999,7 +1999,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) - usleep_range(10000, 20000); + usleep_range(10000, 11000); return blocked ? E1000_BLK_PHY_RESET : 0; } @@ -2818,7 +2818,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) return 0; /* Allow time for h/w to get to quiescent state after reset */ - usleep_range(10000, 20000); + usleep_range(10000, 11000); /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { @@ -2854,7 +2854,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) if (hw->mac.type == e1000_pch2lan) { /* Ungate automatic PHY configuration on non-managed 82579 */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); + usleep_range(10000, 11000); e1000_gate_hw_phy_config_ich8lan(hw, false); } @@ -3875,7 +3875,7 @@ release: */ if (!ret_val) { nvm->ops.reload(hw); - usleep_range(10000, 20000); + usleep_range(10000, 11000); } out: @@ -4026,7 +4026,7 @@ release: */ if (!ret_val) { nvm->ops.reload(hw); - usleep_range(10000, 20000); + usleep_range(10000, 11000); } out: @@ -4650,7 +4650,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(TCTL, E1000_TCTL_PSP); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); /* Workaround for ICH8 bit corruption issue in FIFO memory */ if (hw->mac.type == e1000_ich8lan) { diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 4abd55d646c5..e531976f8a67 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -797,7 +797,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) * milliseconds even if the other end is doing it in SW). */ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { - usleep_range(10000, 20000); + usleep_range(10000, 11000); status = er32(STATUS); if (status & E1000_STATUS_LU) break; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0e09bede42a2..e4baa13b3cda 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,7 +1780,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + queue_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, 1); } /* Reset on uncorrectable ECC error */ @@ -1860,7 +1861,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + queue_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, 1); } /* Reset on uncorrectable ECC error */ @@ -1905,7 +1907,8 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + queue_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -3208,7 +3211,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); if (adapter->flags2 & FLAG2_DMA_BURST) { /* set the writeback threshold (only takes effect if the RDTR @@ -4046,12 +4049,12 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - fc->refresh_time = 0x0400; + fc->refresh_time = 0xFFFF; + fc->pause_time = 0xFFFF; if (adapter->netdev->mtu <= ETH_DATA_LEN) { fc->high_water = 0x05C20; fc->low_water = 0x05048; - fc->pause_time = 0x0650; break; } @@ -4208,7 +4211,7 @@ void e1000e_up(struct e1000_adapter *adapter) e1000_configure_msix(adapter); e1000_irq_enable(adapter); - netif_start_queue(adapter->netdev); + /* Tx queue started by watchdog timer when link is up */ e1000e_trigger_lsc(adapter); } @@ -4272,13 +4275,12 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) /* flush both disables and wait for them to finish */ e1e_flush(); - usleep_range(10000, 20000); + usleep_range(10000, 11000); e1000_irq_disable(adapter); napi_synchronize(&adapter->napi); - del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); @@ -4310,7 +4312,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) { might_sleep(); while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); + usleep_range(1000, 1100); e1000e_down(adapter, true); e1000e_up(adapter); clear_bit(__E1000_RESETTING, &adapter->state); @@ -4606,6 +4608,7 @@ int e1000e_open(struct net_device *netdev) pm_runtime_get_sync(&pdev->dev); netif_carrier_off(netdev); + netif_stop_queue(netdev); /* allocate transmit descriptors */ err = e1000e_setup_tx_resources(adapter->tx_ring); @@ -4666,7 +4669,6 @@ int e1000e_open(struct net_device *netdev) e1000_irq_enable(adapter); adapter->tx_hang_recheck = false; - netif_start_queue(netdev); hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); @@ -4707,7 +4709,7 @@ int e1000e_close(struct net_device *netdev) int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) - usleep_range(10000, 20000); + usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); @@ -5150,31 +5152,18 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) } } -/** - * e1000_watchdog - Timer Call-back - * @data: pointer to adapter cast into an unsigned long - **/ -static void e1000_watchdog(struct timer_list *t) -{ - struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); - - /* Do the rest outside of interrupt context */ - schedule_work(&adapter->watchdog_task); - - /* TODO: make this use queue_delayed_work() */ -} - static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task); + watchdog_task.work); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; struct e1000_ring *tx_ring = adapter->tx_ring; + u32 dmoff_exit_timeout = 100, tries = 0; struct e1000_hw *hw = &adapter->hw; - u32 link, tctl; + u32 link, tctl, pcim_state; if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -5199,6 +5188,21 @@ static void e1000_watchdog_task(struct work_struct *work) /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); + /* Checking if MAC is in DMoff state*/ + pcim_state = er32(STATUS); + while (pcim_state & E1000_STATUS_PCIM_STATE) { + if (tries++ == dmoff_exit_timeout) { + e_dbg("Error in exiting dmoff\n"); + break; + } + usleep_range(10000, 20000); + pcim_state = er32(STATUS); + + /* Checking if MAC exited DMoff state */ + if (!(pcim_state & E1000_STATUS_PCIM_STATE)) + e1000_phy_hw_reset(&adapter->hw); + } + /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, @@ -5288,6 +5292,7 @@ static void e1000_watchdog_task(struct work_struct *work) if (phy->ops.cfg_on_link_up) phy->ops.cfg_on_link_up(hw); + netif_wake_queue(netdev); netif_carrier_on(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -5301,6 +5306,7 @@ static void e1000_watchdog_task(struct work_struct *work) /* Link status message must follow this format */ pr_info("%s NIC Link is Down\n", adapter->netdev->name); netif_carrier_off(netdev); + netif_stop_queue(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); @@ -5308,13 +5314,8 @@ static void e1000_watchdog_task(struct work_struct *work) /* 8000ES2LAN requires a Rx packet buffer work-around * on link down event; reset the controller to flush * the Rx packet buffer. - * - * If the link is lost the controller stops DMA, but - * if there is queued Tx work it cannot be done. So - * reset the controller to flush the Tx packet buffers. */ - if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || - e1000_desc_unused(tx_ring) + 1 < tx_ring->count) + if (adapter->flags & FLAG_RX_NEEDS_RESTART) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, @@ -5337,6 +5338,14 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); @@ -5395,8 +5404,9 @@ link_up: /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); + queue_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, + round_jiffies(2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -6016,7 +6026,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) } while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); + usleep_range(1000, 1100); /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ adapter->max_frame_size = max_frame; e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -6296,7 +6306,7 @@ static int e1000e_pm_freeze(struct device *dev) int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) - usleep_range(10000, 20000); + usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); @@ -6711,7 +6721,7 @@ static int e1000e_pm_runtime_suspend(struct device *dev) int count = E1000_CHECK_RESET_COUNT; while (test_bit(__E1000_RESETTING, &adapter->state) && count--) - usleep_range(10000, 20000); + usleep_range(10000, 11000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); @@ -7251,11 +7261,21 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); + adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, + e1000e_driver_name); + + if (!adapter->e1000_workqueue) { + err = -ENOMEM; + goto err_workqueue; + } + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); + queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, + 0); + timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); - INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@ -7349,6 +7369,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_register: + flush_workqueue(adapter->e1000_workqueue); + destroy_workqueue(adapter->e1000_workqueue); +err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@ -7395,15 +7418,17 @@ static void e1000_remove(struct pci_dev *pdev) */ if (!down) set_bit(__E1000_DOWN, &adapter->state); - del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); + cancel_delayed_work(&adapter->watchdog_task); + flush_workqueue(adapter->e1000_workqueue); + destroy_workqueue(adapter->e1000_workqueue); + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 937f9af22d26..e609f4df86f4 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -392,7 +392,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; } } - usleep_range(10000, 20000); + usleep_range(10000, 11000); nvm->ops.release(hw); } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7ce42040b851..84bd06901014 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -27,6 +27,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/if_macvlan.h> #include <linux/if_bridge.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> @@ -295,8 +296,6 @@ struct i40e_cloud_filter { u8 tunnel_type; }; -#define I40E_ETH_P_LLDP 0x88cc - #define I40E_DCB_PRIO_TYPE_STRICT 0 #define I40E_DCB_PRIO_TYPE_ETS 1 #define I40E_DCB_STRICT_PRIO_CREDITS 127 @@ -414,6 +413,11 @@ struct i40e_flex_pit { u8 pit_index; }; +struct i40e_fwd_adapter { + struct net_device *netdev; + int bit_no; +}; + struct i40e_channel { struct list_head list; bool initialized; @@ -428,11 +432,25 @@ struct i40e_channel { struct i40e_aqc_vsi_properties_data info; u64 max_tx_rate; + struct i40e_fwd_adapter *fwd; /* track this channel belongs to which VSI */ struct i40e_vsi *parent_vsi; }; +static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) +{ + return !!ch->fwd; +} + +static inline u8 *i40e_channel_mac(struct i40e_channel *ch) +{ + if (i40e_is_channel_macvlan(ch)) + return ch->fwd->netdev->dev_addr; + else + return NULL; +} + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -777,7 +795,8 @@ struct i40e_vsi { u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ u16 req_queue_pairs; /* User requested queue pairs */ u16 num_queue_pairs; /* Used tx and rx pairs */ - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */ @@ -814,6 +833,13 @@ struct i40e_vsi { struct list_head ch_list; u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS]; + /* macvlan fields */ +#define I40E_MAX_MACVLANS 128 /* Max HW vectors - 1 on FVL */ +#define I40E_MIN_MACVLAN_VECTORS 2 /* Min vectors to enable macvlans */ + DECLARE_BITMAP(fwd_bitmask, I40E_MAX_MACVLANS); + struct list_head macvlan_list; + int macvlan_cnt; + void *priv; /* client driver data reference. */ /* VSI specific handlers */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 243dcd4bec19..814acbe79ffd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -675,7 +675,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { @@ -835,7 +835,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, } /* bump the tail */ - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; @@ -886,7 +886,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); @@ -995,7 +995,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len); - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ecb1adaa54ec..906cf68d3453 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -281,47 +281,49 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u32 effective_mask = hw->debug_mask & mask; + char prefix[27]; u16 len; u8 *buf = (u8 *)buffer; - if ((!(mask & hw->debug_mask)) || (desc == NULL)) + if (!effective_mask || !desc) return; len = le16_to_cpu(aq_desc->datalen); - i40e_debug(hw, mask, + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), le16_to_cpu(aq_desc->flags), le16_to_cpu(aq_desc->datalen), le16_to_cpu(aq_desc->retval)); - i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tcookie (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->cookie_high), le32_to_cpu(aq_desc->cookie_low)); - i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\tparam (0,1) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.internal.param0), le32_to_cpu(aq_desc->params.internal.param1)); - i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, + "\taddr (h,l) 0x%08X 0x%08X\n", le32_to_cpu(aq_desc->params.external.addr_high), le32_to_cpu(aq_desc->params.external.addr_low)); - if ((buffer != NULL) && (aq_desc->datalen != 0)) { + if (buffer && buf_len != 0 && len != 0 && + (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - /* write the full 16-byte chunks */ - if (hw->debug_mask & mask) { - char prefix[27]; - - snprintf(prefix, sizeof(prefix), - "i40e %02x:%02x.%x: \t0x", - hw->bus.bus_id, - hw->bus.device, - hw->bus.func); - - print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); - } + + snprintf(prefix, sizeof(prefix), + "i40e %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); } } @@ -1859,8 +1861,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= 7) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { __le32 tmp; memcpy(&tmp, resp->link_type, sizeof(tmp)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 7ea4f09229e4..55d20acfcf70 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -333,8 +333,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " seid = %d, id = %d, uplink_seid = %d\n", vsi->seid, vsi->id, vsi->uplink_seid); dev_info(&pf->pdev->dev, - " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n", - vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc); + " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n", + vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc, + vsi->num_rx_desc); dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); if (vsi->type == I40E_VSI_SRIOV) dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); @@ -1330,7 +1331,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, - I40E_ETH_P_LLDP, 0, + ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, true, NULL, NULL); if (ret) { @@ -1348,7 +1349,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, - I40E_ETH_P_LLDP, 0, + ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, false, NULL, NULL); if (ret) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7545b21bee3c..527eb52c5401 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1982,6 +1982,8 @@ static int i40e_set_ringparam(struct net_device *netdev, if (i40e_enabled_xdp_vsi(vsi)) vsi->xdp_rings[i]->count = new_tx_count; } + vsi->num_tx_desc = new_tx_count; + vsi->num_rx_desc = new_rx_count; goto done; } @@ -2118,6 +2120,8 @@ rx_unwind: rx_rings = NULL; } + vsi->num_tx_desc = new_tx_count; + vsi->num_rx_desc = new_rx_count; i40e_up(vsi); free_tx: @@ -4852,9 +4856,12 @@ static u32 i40e_get_priv_flags(struct net_device *dev) static int i40e_set_priv_flags(struct net_device *dev, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); + u64 orig_flags, new_flags, changed_flags; + enum i40e_admin_queue_err adq_err; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u64 orig_flags, new_flags, changed_flags; + bool is_reset_needed; + i40e_status status; u32 i, j; orig_flags = READ_ONCE(pf->flags); @@ -4898,6 +4905,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) flags_complete: changed_flags = orig_flags ^ new_flags; + is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | + I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED | + I40E_FLAG_DISABLE_FW_LLDP)); + /* Before we finalize any flag changes, we need to perform some * checks to ensure that the changes are supported and safe. */ @@ -4932,13 +4943,6 @@ flags_complete: return -EOPNOTSUPP; } - /* Now that we've checked to ensure that the new flags are valid, load - * them into place. Since we only modify flags either (a) during - * initialization or (b) while holding the RTNL lock, we don't need - * anything fancy here. - */ - pf->flags = new_flags; - /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were * changed in the code above. @@ -4946,7 +4950,7 @@ flags_complete: /* Flush current ATR settings if ATR was disabled */ if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && - !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { + !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) { set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } @@ -4955,7 +4959,7 @@ flags_complete: u16 sw_flags = 0, valid_flags = 0; int ret; - if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, @@ -4974,13 +4978,13 @@ flags_complete: (changed_flags & I40E_FLAG_BASE_R_FEC)) { u8 fec_cfg = 0; - if (pf->flags & I40E_FLAG_RS_FEC && - pf->flags & I40E_FLAG_BASE_R_FEC) { + if (new_flags & I40E_FLAG_RS_FEC && + new_flags & I40E_FLAG_BASE_R_FEC) { fec_cfg = I40E_AQ_SET_FEC_AUTO; - } else if (pf->flags & I40E_FLAG_RS_FEC) { + } else if (new_flags & I40E_FLAG_RS_FEC) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | I40E_AQ_SET_FEC_ABILITY_RS); - } else if (pf->flags & I40E_FLAG_BASE_R_FEC) { + } else if (new_flags & I40E_FLAG_BASE_R_FEC) { fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | I40E_AQ_SET_FEC_ABILITY_KR); } @@ -4988,14 +4992,14 @@ flags_complete: dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } - if ((changed_flags & pf->flags & + if ((changed_flags & new_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && - (pf->flags & I40E_FLAG_MFP_ENABLED)) + (new_flags & I40E_FLAG_MFP_ENABLED)) dev_warn(&pf->pdev->dev, "Turning on link-down-on-close flag may affect other partitions\n"); if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { + if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) { struct i40e_dcbx_config *dcbcfg; i40e_aq_stop_lldp(&pf->hw, true, false, NULL); @@ -5013,17 +5017,43 @@ flags_complete: dcbcfg->pfc.willing = 1; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; } else { - i40e_aq_start_lldp(&pf->hw, false, NULL); + status = i40e_aq_start_lldp(&pf->hw, false, NULL); + if (status) { + adq_err = pf->hw.aq.asq_last_status; + switch (adq_err) { + case I40E_AQ_RC_EEXIST: + dev_warn(&pf->pdev->dev, + "FW LLDP agent is already running\n"); + is_reset_needed = false; + break; + case I40E_AQ_RC_EPERM: + dev_warn(&pf->pdev->dev, + "Device configuration forbids SW from starting the LLDP agent.\n"); + return -EINVAL; + default: + dev_warn(&pf->pdev->dev, + "Starting FW LLDP agent failed: error: %s, %s\n", + i40e_stat_str(&pf->hw, + status), + i40e_aq_str(&pf->hw, + adq_err)); + return -EINVAL; + } + } } } + /* Now that we've checked to ensure that the new flags are valid, load + * them into place. Since we only modify flags either (a) during + * initialization or (b) while holding the RTNL lock, we don't need + * anything fancy here. + */ + pf->flags = new_flags; + /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset */ - if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | - I40E_FLAG_LEGACY_RX | - I40E_FLAG_SOURCE_PRUNING_DISABLED | - I40E_FLAG_DISABLE_FW_LLDP)) + if (is_reset_needed) i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); return 0; @@ -5181,6 +5211,16 @@ static int i40e_get_module_eeprom(struct net_device *netdev, return 0; } +static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return -EOPNOTSUPP; +} + +static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return -EOPNOTSUPP; +} + static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = { .set_eeprom = i40e_set_eeprom, .get_eeprom_len = i40e_get_eeprom_len, @@ -5208,6 +5248,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_rxnfc = i40e_set_rxnfc, .self_test = i40e_diag_test, .get_strings = i40e_get_strings, + .get_eee = i40e_get_eee, + .set_eee = i40e_set_eee, .set_phys_id = i40e_set_phys_id, .get_sset_count = i40e_get_sset_count, .get_ethtool_stats = i40e_get_ethtool_stats, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 320562b39686..9ebbe3da61bb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -32,7 +32,7 @@ static const char i40e_driver_string[] = __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN const char i40e_driver_version_str[] = DRV_VERSION; -static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; +static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); @@ -636,9 +636,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); - i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), - vsi->stat_offsets_loaded, - &oes->tx_errors, &es->tx_errors); i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), @@ -5864,8 +5861,10 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, return -ENOENT; } - /* Success, update channel */ - ch->enabled_tc = enabled_tc; + /* Success, update channel, set enabled_tc only if the channel + * is not a macvlan + */ + ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; ch->seid = ctxt.seid; ch->vsi_number = ctxt.vsi_number; ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); @@ -6413,6 +6412,50 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) } /** + * i40e_update_dcb_config + * @hw: pointer to the HW struct + * @enable_mib_change: enable MIB change event + * + * Update DCB configuration from the firmware + **/ +static enum i40e_status_code +i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change) +{ + struct i40e_lldp_variables lldp_cfg; + i40e_status ret; + + if (!hw->func_caps.dcb) + return I40E_NOT_SUPPORTED; + + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return I40E_ERR_NOT_READY; + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || + hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + if (ret) + return ret; + } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + return I40E_ERR_NOT_READY; + } + + /* Configure the LLDP MIB change event */ + if (enable_mib_change) + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + + return ret; +} + +/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -6428,11 +6471,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) * Also do not enable DCBx if FW LLDP agent is disabled */ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { + dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); + err = I40E_NOT_SUPPORTED; goto out; + } - /* Get the initial DCB configuration */ - err = i40e_init_dcb(hw, true); + err = i40e_update_dcb_config(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || @@ -6869,6 +6914,489 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) } /** + * i40e_del_macvlan_filter + * @hw: pointer to the HW structure + * @seid: seid of the channel VSI + * @macaddr: the mac address to apply as a filter + * @aq_err: store the admin Q error + * + * This function deletes a mac filter on the channel VSI which serves as the + * macvlan. Returns 0 on success. + **/ +static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) +{ + struct i40e_aqc_remove_macvlan_element_data element; + i40e_status status; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL); + *aq_err = hw->aq.asq_last_status; + + return status; +} + +/** + * i40e_add_macvlan_filter + * @hw: pointer to the HW structure + * @seid: seid of the channel VSI + * @macaddr: the mac address to apply as a filter + * @aq_err: store the admin Q error + * + * This function adds a mac filter on the channel VSI which serves as the + * macvlan. Returns 0 on success. + **/ +static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, + const u8 *macaddr, int *aq_err) +{ + struct i40e_aqc_add_macvlan_element_data element; + i40e_status status; + u16 cmd_flags = 0; + + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + element.queue_number = 0; + element.match_method = I40E_AQC_MM_ERR_NO_RES; + cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + element.flags = cpu_to_le16(cmd_flags); + status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL); + *aq_err = hw->aq.asq_last_status; + + return status; +} + +/** + * i40e_reset_ch_rings - Reset the queue contexts in a channel + * @vsi: the VSI we want to access + * @ch: the channel we want to access + */ +static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) +{ + struct i40e_ring *tx_ring, *rx_ring; + u16 pf_q; + int i; + + for (i = 0; i < ch->num_queue_pairs; i++) { + pf_q = ch->base_queue + i; + tx_ring = vsi->tx_rings[pf_q]; + tx_ring->ch = NULL; + rx_ring = vsi->rx_rings[pf_q]; + rx_ring->ch = NULL; + } +} + +/** + * i40e_free_macvlan_channels + * @vsi: the VSI we want to access + * + * This function frees the Qs of the channel VSI from + * the stack and also deletes the channel VSIs which + * serve as macvlans. + */ +static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + int ret; + + if (list_empty(&vsi->macvlan_list)) + return; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { + struct i40e_vsi *parent_vsi; + + if (i40e_is_channel_macvlan(ch)) { + i40e_reset_ch_rings(vsi, ch); + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); + netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); + netdev_set_sb_channel(ch->fwd->netdev, 0); + kfree(ch->fwd); + ch->fwd = NULL; + } + + list_del(&ch->list); + parent_vsi = ch->parent_vsi; + if (!parent_vsi || !ch->initialized) { + kfree(ch); + continue; + } + + /* remove the VSI */ + ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, + NULL); + if (ret) + dev_err(&vsi->back->pdev->dev, + "unable to remove channel (%d) for parent VSI(%d)\n", + ch->seid, parent_vsi->seid); + kfree(ch); + } + vsi->macvlan_cnt = 0; +} + +/** + * i40e_fwd_ring_up - bring the macvlan device up + * @vsi: the VSI we want to access + * @vdev: macvlan netdevice + * @fwd: the private fwd structure + */ +static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, + struct i40e_fwd_adapter *fwd) +{ + int ret = 0, num_tc = 1, i, aq_err; + struct i40e_channel *ch, *ch_tmp; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + if (list_empty(&vsi->macvlan_list)) + return -EINVAL; + + /* Go through the list and find an available channel */ + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { + if (!i40e_is_channel_macvlan(ch)) { + ch->fwd = fwd; + /* record configuration for macvlan interface in vdev */ + for (i = 0; i < num_tc; i++) + netdev_bind_sb_channel_queue(vsi->netdev, vdev, + i, + ch->num_queue_pairs, + ch->base_queue); + for (i = 0; i < ch->num_queue_pairs; i++) { + struct i40e_ring *tx_ring, *rx_ring; + u16 pf_q; + + pf_q = ch->base_queue + i; + + /* Get to TX ring ptr */ + tx_ring = vsi->tx_rings[pf_q]; + tx_ring->ch = ch; + + /* Get the RX ring ptr */ + rx_ring = vsi->rx_rings[pf_q]; + rx_ring->ch = ch; + } + break; + } + } + + /* Guarantee all rings are updated before we update the + * MAC address filter. + */ + wmb(); + + /* Add a mac filter */ + ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); + if (ret) { + /* if we cannot add the MAC rule then disable the offload */ + macvlan_release_l2fw_offload(vdev); + for (i = 0; i < ch->num_queue_pairs; i++) { + struct i40e_ring *rx_ring; + u16 pf_q; + + pf_q = ch->base_queue + i; + rx_ring = vsi->rx_rings[pf_q]; + rx_ring->netdev = NULL; + } + dev_info(&pf->pdev->dev, + "Error adding mac filter on macvlan err %s, aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, aq_err)); + netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); + } + + return ret; +} + +/** + * i40e_setup_macvlans - create the channels which will be macvlans + * @vsi: the VSI we want to access + * @macvlan_cnt: no. of macvlans to be setup + * @qcnt: no. of Qs per macvlan + * @vdev: macvlan netdevice + */ +static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, + struct net_device *vdev) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi_context ctxt; + u16 sections, qmap, num_qps; + struct i40e_channel *ch; + int i, pow, ret = 0; + u8 offset = 0; + + if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) + return -EINVAL; + + num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); + + /* find the next higher power-of-2 of num queue pairs */ + pow = fls(roundup_pow_of_two(num_qps) - 1); + + qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + + /* Setup context bits for the main VSI */ + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = vsi->seid; + ctxt.pf_num = vsi->back->hw.pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); + ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); + ctxt.info.valid_sections |= cpu_to_le16(sections); + + /* Reconfigure RSS for main VSI with new max queue count */ + vsi->rss_size = max_t(u16, num_qps, qcnt); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed to reconfig RSS for num_queues (%u)\n", + vsi->rss_size); + return ret; + } + vsi->reconfig_rss = true; + dev_dbg(&vsi->back->pdev->dev, + "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); + vsi->next_base_queue = num_qps; + vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; + + /* Update the VSI after updating the VSI queue-mapping + * information + */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Update vsi tc config failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + /* Create channels for macvlans */ + INIT_LIST_HEAD(&vsi->macvlan_list); + for (i = 0; i < macvlan_cnt; i++) { + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) { + ret = -ENOMEM; + goto err_free; + } + INIT_LIST_HEAD(&ch->list); + ch->num_queue_pairs = qcnt; + if (!i40e_setup_channel(pf, vsi, ch)) { + ret = -EINVAL; + goto err_free; + } + ch->parent_vsi = vsi; + vsi->cnt_q_avail -= ch->num_queue_pairs; + vsi->macvlan_cnt++; + list_add_tail(&ch->list, &vsi->macvlan_list); + } + + return ret; + +err_free: + dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); + i40e_free_macvlan_channels(vsi); + + return ret; +} + +/** + * i40e_fwd_add - configure macvlans + * @netdev: net device to configure + * @vdev: macvlan netdevice + **/ +static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_fwd_adapter *fwd; + int avail_macvlan, ret; + + if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { + netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); + return ERR_PTR(-EINVAL); + } + if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { + netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); + return ERR_PTR(-EINVAL); + } + if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { + netdev_info(netdev, "Not enough vectors available to support macvlans\n"); + return ERR_PTR(-EINVAL); + } + + /* The macvlan device has to be a single Q device so that the + * tc_to_txq field can be reused to pick the tx queue. + */ + if (netif_is_multiqueue(vdev)) + return ERR_PTR(-ERANGE); + + if (!vsi->macvlan_cnt) { + /* reserve bit 0 for the pf device */ + set_bit(0, vsi->fwd_bitmask); + + /* Try to reserve as many queues as possible for macvlans. First + * reserve 3/4th of max vectors, then half, then quarter and + * calculate Qs per macvlan as you go + */ + vectors = pf->num_lan_msix; + if (vectors <= I40E_MAX_MACVLANS && vectors > 64) { + /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ + q_per_macvlan = 4; + macvlan_cnt = (vectors - 32) / 4; + } else if (vectors <= 64 && vectors > 32) { + /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ + q_per_macvlan = 2; + macvlan_cnt = (vectors - 16) / 2; + } else if (vectors <= 32 && vectors > 16) { + /* allocate 1 Q per macvlan and 16 Qs to the PF*/ + q_per_macvlan = 1; + macvlan_cnt = vectors - 16; + } else if (vectors <= 16 && vectors > 8) { + /* allocate 1 Q per macvlan and 8 Qs to the PF */ + q_per_macvlan = 1; + macvlan_cnt = vectors - 8; + } else { + /* allocate 1 Q per macvlan and 1 Q to the PF */ + q_per_macvlan = 1; + macvlan_cnt = vectors - 1; + } + + if (macvlan_cnt == 0) + return ERR_PTR(-EBUSY); + + /* Quiesce VSI queues */ + i40e_quiesce_vsi(vsi); + + /* sets up the macvlans but does not "enable" them */ + ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, + vdev); + if (ret) + return ERR_PTR(ret); + + /* Unquiesce VSI */ + i40e_unquiesce_vsi(vsi); + } + avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, + vsi->macvlan_cnt); + if (avail_macvlan >= I40E_MAX_MACVLANS) + return ERR_PTR(-EBUSY); + + /* create the fwd struct */ + fwd = kzalloc(sizeof(*fwd), GFP_KERNEL); + if (!fwd) + return ERR_PTR(-ENOMEM); + + set_bit(avail_macvlan, vsi->fwd_bitmask); + fwd->bit_no = avail_macvlan; + netdev_set_sb_channel(vdev, avail_macvlan); + fwd->netdev = vdev; + + if (!netif_running(netdev)) + return fwd; + + /* Set fwd ring up */ + ret = i40e_fwd_ring_up(vsi, vdev, fwd); + if (ret) { + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(netdev, vdev); + netdev_set_sb_channel(vdev, 0); + + kfree(fwd); + return ERR_PTR(-EINVAL); + } + + return fwd; +} + +/** + * i40e_del_all_macvlans - Delete all the mac filters on the channels + * @vsi: the VSI we want to access + */ +static void i40e_del_all_macvlans(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int aq_err, ret = 0; + + if (list_empty(&vsi->macvlan_list)) + return; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { + if (i40e_is_channel_macvlan(ch)) { + ret = i40e_del_macvlan_filter(hw, ch->seid, + i40e_channel_mac(ch), + &aq_err); + if (!ret) { + /* Reset queue contexts */ + i40e_reset_ch_rings(vsi, ch); + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); + netdev_unbind_sb_channel(vsi->netdev, + ch->fwd->netdev); + netdev_set_sb_channel(ch->fwd->netdev, 0); + kfree(ch->fwd); + ch->fwd = NULL; + } + } + } +} + +/** + * i40e_fwd_del - delete macvlan interfaces + * @netdev: net device to configure + * @vdev: macvlan netdevice + */ +static void i40e_fwd_del(struct net_device *netdev, void *vdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_fwd_adapter *fwd = vdev; + struct i40e_channel *ch, *ch_tmp; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int aq_err, ret = 0; + + /* Find the channel associated with the macvlan and del mac filter */ + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { + if (i40e_is_channel_macvlan(ch) && + ether_addr_equal(i40e_channel_mac(ch), + fwd->netdev->dev_addr)) { + ret = i40e_del_macvlan_filter(hw, ch->seid, + i40e_channel_mac(ch), + &aq_err); + if (!ret) { + /* Reset queue contexts */ + i40e_reset_ch_rings(vsi, ch); + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); + netdev_unbind_sb_channel(netdev, fwd->netdev); + netdev_set_sb_channel(fwd->netdev, 0); + kfree(ch->fwd); + ch->fwd = NULL; + } else { + dev_info(&pf->pdev->dev, + "Error deleting mac filter on macvlan err %s, aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, aq_err)); + } + break; + } + } +} + +/** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure * @type_data: tc offload data @@ -6963,6 +7491,10 @@ config_tc: vsi->seid); need_reset = true; goto exit; + } else { + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); } if (pf->flags & I40E_FLAG_TC_MQPRIO) { @@ -7227,15 +7759,15 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, /** * i40e_parse_cls_flower - Parse tc flower filters provided by kernel * @vsi: Pointer to VSI - * @cls_flower: Pointer to struct tc_cls_flower_offload + * @cls_flower: Pointer to struct flow_cls_offload * @filter: Pointer to cloud filter structure * **/ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, - struct tc_cls_flower_offload *f, + struct flow_cls_offload *f, struct i40e_cloud_filter *filter) { - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct i40e_pf *pf = vsi->back; @@ -7469,11 +8001,11 @@ static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, /** * i40e_configure_clsflower - Configure tc flower filters * @vsi: Pointer to VSI - * @cls_flower: Pointer to struct tc_cls_flower_offload + * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_configure_clsflower(struct i40e_vsi *vsi, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); struct i40e_cloud_filter *filter = NULL; @@ -7565,11 +8097,11 @@ static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, /** * i40e_delete_clsflower - Remove tc flower filters * @vsi: Pointer to VSI - * @cls_flower: Pointer to struct tc_cls_flower_offload + * @cls_flower: Pointer to struct flow_cls_offload * **/ static int i40e_delete_clsflower(struct i40e_vsi *vsi, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { struct i40e_cloud_filter *filter = NULL; struct i40e_pf *pf = vsi->back; @@ -7612,16 +8144,16 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi, * @type_data: offload data **/ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { struct i40e_vsi *vsi = np->vsi; switch (cls_flower->command) { - case TC_CLSFLOWER_REPLACE: + case FLOW_CLS_REPLACE: return i40e_configure_clsflower(vsi, cls_flower); - case TC_CLSFLOWER_DESTROY: + case FLOW_CLS_DESTROY: return i40e_delete_clsflower(vsi, cls_flower); - case TC_CLSFLOWER_STATS: + case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; @@ -7645,34 +8177,21 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int i40e_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); - return 0; - default: - return -EOPNOTSUPP; - } -} +static LIST_HEAD(i40e_block_cb_list); static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { + struct i40e_netdev_priv *np = netdev_priv(netdev); + switch (type) { case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return i40e_setup_tc_block(netdev, type_data); + return flow_block_cb_setup_simple(type_data, + &i40e_block_cb_list, + i40e_setup_tc_block_cb, + np, np, true); default: return -EOPNOTSUPP; } @@ -8570,7 +9089,7 @@ static void i40e_link_event(struct i40e_pf *pf) /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ - if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else i40e_vsi_link_event(vsi, new_link); @@ -10031,8 +10550,12 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) switch (vsi->type) { case I40E_VSI_MAIN: vsi->alloc_queue_pairs = pf->num_lan_qps; - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, - I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_q_vectors = pf->num_lan_msix; else @@ -10042,22 +10565,32 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) case I40E_VSI_FDIR: vsi->alloc_queue_pairs = 1; - vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, - I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, + I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_fdsb_msix; break; case I40E_VSI_VMDQ2: vsi->alloc_queue_pairs = pf->num_vmdq_qps; - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, - I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_vmdq_msix; break; case I40E_VSI_SRIOV: vsi->alloc_queue_pairs = pf->num_vf_qps; - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, - I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); break; default: @@ -10333,7 +10866,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) @@ -10350,7 +10883,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->vsi = vsi; ring->netdev = NULL; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_tx_desc; ring->size = 0; ring->dcb_tc = 0; if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) @@ -10366,7 +10899,7 @@ setup_rx: ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_rx_desc; ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; @@ -11604,6 +12137,9 @@ static int i40e_set_features(struct net_device *netdev, return -EINVAL; } + if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) + i40e_del_all_macvlans(vsi); + need_reset = i40e_set_ntuple(pf, features); if (need_reset) @@ -12348,6 +12884,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, .ndo_xsk_async_xmit = i40e_xsk_async_xmit, + .ndo_dfwd_add_station = i40e_fwd_add, + .ndo_dfwd_del_station = i40e_fwd_del, }; /** @@ -12407,6 +12945,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; + /* enable macvlan offloads */ + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + hw_features = hw_enc_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -12519,7 +13060,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; /* Uplink is not a bridge so default to VEB */ - if (vsi->veb_idx == I40E_NO_VEB) + if (vsi->veb_idx >= I40E_MAX_VEB) return 1; veb = pf->veb[vsi->veb_idx]; @@ -13577,7 +14118,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; - if (pf->lan_veb == I40E_NO_VEB) { + if (pf->lan_veb >= I40E_MAX_VEB) { int v; /* find existing or else empty VEB */ @@ -13587,13 +14128,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, break; } } - if (pf->lan_veb == I40E_NO_VEB) { + if (pf->lan_veb >= I40E_MAX_VEB) { v = i40e_veb_mem_alloc(pf); if (v < 0) break; pf->lan_veb = v; } } + if (pf->lan_veb >= I40E_MAX_VEB) + break; pf->veb[pf->lan_veb]->seid = seid; pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; @@ -13747,7 +14290,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ - if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) uplink_seid = pf->veb[pf->lan_veb]->seid; else uplink_seid = pf->mac_seid; @@ -14203,7 +14746,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); - + /* We believe that the highest register to read is + * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size + * is not less than that before mapping to prevent a + * kernel panic. + */ + if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { + dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", + pf->ioremap_len); + err = -ENOMEM; + goto err_ioremap; + } hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; @@ -14388,6 +14941,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, pf); pci_save_state(pdev); + dev_info(&pdev->dev, + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); + /* Enable FW to write default DCB config on link-up */ i40e_aq_set_dcb_parameters(hw, true, NULL); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 882627073dce..eac88bcc6c06 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -350,6 +350,10 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) return VIRTCHNL_LINK_SPEED_100MB; case I40E_LINK_SPEED_1GB: return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_2_5GB: + return VIRTCHNL_LINK_SPEED_2_5GB; + case I40E_LINK_SPEED_5GB: + return VIRTCHNL_LINK_SPEED_5GB; case I40E_LINK_SPEED_10GB: return VIRTCHNL_LINK_SPEED_10GB; case I40E_LINK_SPEED_40GB: diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 439c35f0c581..11394a52e21c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -140,8 +140,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) * @ptp: The PTP clock structure * @delta: Offset in nanoseconds to adjust the PHC time by * - * Adjust the frequency of the PHC by the indicated parts per billion from the - * base frequency. + * Adjust the current clock time by a delta specified in nanoseconds. **/ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 20a283702c9f..2a2fe3ec7926 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -774,7 +774,7 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget) { - u16 i = tx_ring->next_to_clean; + int i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 479bc60c8f71..02b09a8ad54c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -440,7 +440,7 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, struct virtchnl_iwarp_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; - u32 msix_vf, size; + u32 msix_vf; int ret = 0; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -454,11 +454,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, goto err_out; } - size = sizeof(struct virtchnl_iwarp_qvlist_info) + - (sizeof(struct virtchnl_iwarp_qv_info) * - (qvlist_info->num_vectors - 1)); kfree(vf->qvlist_info); - vf->qvlist_info = kzalloc(size, GFP_KERNEL); + vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, + qvlist_info->num_vectors - 1), + GFP_KERNEL); if (!vf->qvlist_info) { ret = -ENOMEM; goto err_out; @@ -470,14 +469,15 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, qv_info = &qvlist_info->qv_info[i]; if (!qv_info) continue; - v_idx = qv_info->v_idx; /* Validate vector id belongs to this vf */ - if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { + if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { ret = -EINVAL; goto err_free; } + v_idx = qv_info->v_idx; + vf->qvlist_info->qv_info[i] = *qv_info; reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); @@ -1845,7 +1845,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; int num_vsis = 1; - int len = 0; + size_t len = 0; int ret; if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -1853,9 +1853,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) goto err; } - len = (sizeof(struct virtchnl_vf_resource) + - sizeof(struct virtchnl_vsi_resource) * num_vsis); - + len = struct_size(vfres, vsi_res, num_vsis); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { aq_ret = I40E_ERR_NO_MEMORY; @@ -2135,8 +2133,13 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } } - if (vf->adq_enabled) + if (vf->adq_enabled) { + if (idx >= ARRAY_SIZE(vf->ch)) { + aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + goto error_param; + } vsi_id = vf->ch[idx].vsi_id; + } if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, &qpi->rxq) || @@ -2152,6 +2155,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * to its appropriate VSIs based on TC mapping **/ if (vf->adq_enabled) { + if (idx >= ARRAY_SIZE(vf->ch)) { + aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + goto error_param; + } if (j == (vf->ch[idx].num_qps - 1)) { idx++; j = 0; /* resetting the queue count */ @@ -2318,7 +2325,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; - u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; int i; @@ -2327,7 +2333,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2427,18 +2433,14 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - int req_pairs = vfres->num_queue_pairs; - int cur_pairs = vf->num_queue_pairs; + u16 req_pairs = vfres->num_queue_pairs; + u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) return -EINVAL; - if (req_pairs <= 0) { - dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", - vf->vf_id, req_pairs); - } else if (req_pairs > I40E_MAX_VF_QUEUES) { + if (req_pairs > I40E_MAX_VF_QUEUES) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, @@ -2509,7 +2511,7 @@ error_param: * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast */ #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) -#define I40E_VC_MAX_VLAN_PER_VF 8 +#define I40E_VC_MAX_VLAN_PER_VF 16 /** * i40e_check_vf_permission @@ -2587,12 +2589,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = al->vsi_id; i40e_status ret = 0; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; } @@ -2657,12 +2658,11 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = al->vsi_id; i40e_status ret = 0; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; } @@ -2726,7 +2726,6 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = vfl->vsi_id; i40e_status aq_ret = 0; int i; @@ -2737,7 +2736,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2798,12 +2797,11 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = vfl->vsi_id; i40e_status aq_ret = 0; int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2920,11 +2918,10 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) (struct virtchnl_rss_key *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = vrk->vsi_id; i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; goto err; @@ -2951,16 +2948,22 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) (struct virtchnl_rss_lut *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - u16 vsi_id = vrl->vsi_id; i40e_status aq_ret = 0; + u16 i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || - !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; goto err; } + for (i = 0; i < vrl->lut_entries; i++) + if (vrl->lut[i] >= vf->num_queue_pairs) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + vsi = pf->vsi[vf->lan_vsi_idx]; aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); /* send the response to the VF */ @@ -3041,14 +3044,15 @@ err: **/ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { - struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; + struct i40e_vsi *vsi; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } + vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_vlan_stripping_enable(vsi); /* send the response to the VF */ @@ -3066,14 +3070,15 @@ err: **/ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) { - struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_status aq_ret = 0; + struct i40e_vsi *vsi; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } + vsi = vf->pf->vsi[vf->lan_vsi_idx]; i40e_vlan_stripping_disable(vsi); /* send the response to the VF */ @@ -3531,8 +3536,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_tc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_link_status *ls = &pf->hw.phy.link_info; - int i, adq_request_qps = 0, speed = 0; + int i, adq_request_qps = 0; i40e_status aq_ret = 0; + u64 speed = 0; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; @@ -3558,8 +3564,8 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* max number of traffic classes for VF currently capped at 4 */ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { dev_err(&pf->pdev->dev, - "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", - vf->vf_id, tci->num_tc); + "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", + vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); aq_ret = I40E_ERR_PARAM; goto err; } @@ -3569,8 +3575,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) if (!tci->list[i].count || tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { dev_err(&pf->pdev->dev, - "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", - vf->vf_id, i, tci->list[i].count); + "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", + vf->vf_id, i, tci->list[i].count, + I40E_DEFAULT_QUEUES_PER_VF); aq_ret = I40E_ERR_PARAM; goto err; } @@ -3730,19 +3737,6 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, /* perform basic checks on the msg */ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); - /* perform additional checks specific to this driver */ - if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { - struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - - if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) - ret = -EINVAL; - } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { - struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - - if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) - ret = -EINVAL; - } - if (ret) { i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", @@ -3943,6 +3937,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) int bkt; u8 i; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) @@ -3967,11 +3966,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } - if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { - dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); - return -EAGAIN; - } - if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); @@ -4302,10 +4296,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, vf = &pf->vf[vf_id]; /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); - ret = -EAGAIN; + if (!vsi) { + ret = -ENOENT; goto error_param; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 1b17486543ac..32bad014d76c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -215,6 +215,7 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) break; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); /* fallthrough -- handle aborts by dropping packet */ @@ -640,8 +641,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) struct i40e_tx_desc *tx_desc = NULL; struct i40e_tx_buffer *tx_bi; bool work_done = true; + struct xdp_desc desc; dma_addr_t dma; - u32 len; while (budget-- > 0) { if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { @@ -650,21 +651,23 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) break; } - if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; - dma_sync_single_for_device(xdp_ring->dev, dma, len, + dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); + + dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, DMA_BIDIRECTIONAL); tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; - tx_bi->bytecount = len; + tx_bi->bytecount = desc.len; tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, - 0, len, 0); + 0, desc.len, 0); xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 9cbb5743ed12..c997063ed728 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -12,4 +12,4 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ - iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o + iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h deleted file mode 100644 index e5ae4a1c0cff..000000000000 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h +++ /dev/null @@ -1,530 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. Do not change the names in this file to IAVF - * because this file should be diff-able against the i40e version, even - * though many parts have been removed in this VF version. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0008 - -#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ - I40E_FW_API_VERSION_MINOR_X710 : \ - I40E_FW_API_VERSION_MINOR_X722) - -/* API version 1.7 implements additional link and PHY-specific APIs */ -#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - /* Proxy commands */ - i40e_aqc_opc_set_proxy_config = 0x0104, - i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* WoL commands */ - i40e_aqc_opc_set_wol_filter = 0x0120, - i40e_aqc_opc_get_wake_reason = 0x0121, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - i40e_aqc_opc_set_switch_config = 0x0205, - i40e_aqc_opc_rx_ctl_reg_read = 0x0206, - i40e_aqc_opc_rx_ctl_reg_write = 0x0207, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - i40e_aqc_opc_clear_wol_switch_filters = 0x025E, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* Dynamic Device Personalization */ - i40e_aqc_opc_write_personalization_profile = 0x0270, - i40e_aqc_opc_get_personalization_profile_list = 0x0271, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - i40e_aqc_opc_set_dcb_parameters = 0x0303, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - i40e_aqc_opc_run_phy_activity = 0x0626, - i40e_aqc_opc_set_phy_register = 0x0628, - i40e_aqc_opc_get_phy_register = 0x0629, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - i40e_aqc_opc_oem_post_update = 0x0720, - i40e_aqc_opc_thermal_sensor = 0x0721, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_set_rss_key = 0x0B02, - i40e_aqc_opc_set_rss_lut = 0x0B03, - i40e_aqc_opc_get_rss_key = 0x0B04, - i40e_aqc_opc_get_rss_lut = 0x0B05, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, - i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, - - /* debug commands */ - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress tbl */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); -#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 272d76b733aa..9fc635d816d2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -109,7 +109,7 @@ struct iavf_q_vector { /* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value - * supported by all of the i40e hardware is 8. + * supported by all of the iavf hardware is 8. */ #define EITR_INTS_PER_SEC_TO_REG(_eitr) \ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) @@ -171,6 +171,7 @@ enum iavf_state_t { __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ __IAVF_INIT_SW, /* got resources, setting up structs */ __IAVF_RESETTING, /* in reset */ + __IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */ __IAVF_DOWN, /* ready, can be opened */ __IAVF_DOWN_PENDING, /* descending, waiting for watchdog */ @@ -216,7 +217,6 @@ struct iavf_cloud_filter { /* board specific private data structure */ struct iavf_adapter { - struct timer_list watchdog_timer; struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; @@ -244,7 +244,7 @@ struct iavf_adapter { int num_iwarp_msix; int iwarp_base_vector; u32 client_pending; - struct i40e_client_instance *cinst; + struct iavf_client_instance *cinst; struct msix_entry *msix_entries; u32 flags; @@ -303,7 +303,7 @@ struct iavf_adapter { enum iavf_state_t state; unsigned long crit_section; - struct work_struct watchdog_task; + struct delayed_work watchdog_task; bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; @@ -351,7 +351,7 @@ struct iavf_adapter { /* Ethtool Private Flags */ /* lan device, used by client interface */ -struct i40e_device { +struct iavf_device { struct list_head list; struct iavf_adapter *vf; }; @@ -359,6 +359,7 @@ struct i40e_device { /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; extern const char iavf_driver_version[]; +extern struct workqueue_struct *iavf_wq; int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); @@ -402,7 +403,7 @@ void iavf_enable_vlan_stripping(struct iavf_adapter *adapter); void iavf_disable_vlan_stripping(struct iavf_adapter *adapter); void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, - iavf_status v_retval, u8 *msg, u16 msglen); + enum iavf_status v_retval, u8 *msg, u16 msglen); int iavf_config_rss(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index fca1ecfd9f71..9fa3fa99b4c2 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -4,16 +4,16 @@ #include "iavf_status.h" #include "iavf_type.h" #include "iavf_register.h" -#include "i40e_adminq.h" +#include "iavf_adminq.h" #include "iavf_prototype.h" /** - * i40e_adminq_init_regs - Initialize AdminQ registers + * iavf_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_asq and alloc_arq functions have already been called **/ -static void i40e_adminq_init_regs(struct iavf_hw *hw) +static void iavf_adminq_init_regs(struct iavf_hw *hw) { /* set head and tail registers in our local struct */ hw->aq.asq.tail = IAVF_VF_ATQT1; @@ -29,24 +29,24 @@ static void i40e_adminq_init_regs(struct iavf_hw *hw) } /** - * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ -static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) +static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw) { - iavf_status ret_code; + enum iavf_status ret_code; ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, - i40e_mem_atq_ring, + iavf_mem_atq_ring, (hw->aq.num_asq_entries * - sizeof(struct i40e_aq_desc)), + sizeof(struct iavf_aq_desc)), IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) return ret_code; ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * - sizeof(struct i40e_asq_cmd_details))); + sizeof(struct iavf_asq_cmd_details))); if (ret_code) { iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; @@ -56,55 +56,55 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) } /** - * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings * @hw: pointer to the hardware structure **/ -static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) +static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) { - iavf_status ret_code; + enum iavf_status ret_code; ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, - i40e_mem_arq_ring, + iavf_mem_arq_ring, (hw->aq.num_arq_entries * - sizeof(struct i40e_aq_desc)), + sizeof(struct iavf_aq_desc)), IAVF_ADMINQ_DESC_ALIGNMENT); return ret_code; } /** - * i40e_free_adminq_asq - Free Admin Queue send rings + * iavf_free_adminq_asq - Free Admin Queue send rings * @hw: pointer to the hardware structure * * This assumes the posted send buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_asq(struct iavf_hw *hw) +static void iavf_free_adminq_asq(struct iavf_hw *hw) { iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** - * i40e_free_adminq_arq - Free Admin Queue receive rings + * iavf_free_adminq_arq - Free Admin Queue receive rings * @hw: pointer to the hardware structure * * This assumes the posted receive buffers have already been cleaned * and de-allocated **/ -static void i40e_free_adminq_arq(struct iavf_hw *hw) +static void iavf_free_adminq_arq(struct iavf_hw *hw) { iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** - * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ -static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) +static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) { - struct i40e_aq_desc *desc; + struct iavf_aq_desc *desc; struct iavf_dma_mem *bi; - iavf_status ret_code; + enum iavf_status ret_code; int i; /* We'll be allocating the buffer info memory first, then we can @@ -123,7 +123,7 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = iavf_allocate_dma_mem(hw, bi, - i40e_mem_arq_buf, + iavf_mem_arq_buf, hw->aq.arq_buf_size, IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) @@ -132,9 +132,9 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) /* now configure the descriptors for use */ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); - if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration @@ -165,13 +165,13 @@ unwind_alloc_arq_bufs: } /** - * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue * @hw: pointer to the hardware structure **/ -static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) +static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) { struct iavf_dma_mem *bi; - iavf_status ret_code; + enum iavf_status ret_code; int i; /* No mapped memory needed yet, just the buffer info structures */ @@ -186,7 +186,7 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = iavf_allocate_dma_mem(hw, bi, - i40e_mem_asq_buf, + iavf_mem_asq_buf, hw->aq.asq_buf_size, IAVF_ADMINQ_DESC_ALIGNMENT); if (ret_code) @@ -206,10 +206,10 @@ unwind_alloc_asq_bufs: } /** - * i40e_free_arq_bufs - Free receive queue buffer info elements + * iavf_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_arq_bufs(struct iavf_hw *hw) +static void iavf_free_arq_bufs(struct iavf_hw *hw) { int i; @@ -225,10 +225,10 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw) } /** - * i40e_free_asq_bufs - Free send queue buffer info elements + * iavf_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ -static void i40e_free_asq_bufs(struct iavf_hw *hw) +static void iavf_free_asq_bufs(struct iavf_hw *hw) { int i; @@ -248,14 +248,14 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw) } /** - * i40e_config_asq_regs - configure ASQ registers + * iavf_config_asq_regs - configure ASQ registers * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ -static iavf_status i40e_config_asq_regs(struct iavf_hw *hw) +static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -271,20 +271,20 @@ static iavf_status i40e_config_asq_regs(struct iavf_hw *hw) /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.asq.bal); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** - * i40e_config_arq_regs - ARQ register configuration + * iavf_config_arq_regs - ARQ register configuration * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ -static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) +static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; u32 reg = 0; /* Clear Head and Tail */ @@ -303,13 +303,13 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.arq.bal); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; return ret_code; } /** - * i40e_init_asq - main initialization routine for ASQ + * iavf_init_asq - main initialization routine for ASQ * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue @@ -321,20 +321,20 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static iavf_status i40e_init_asq(struct iavf_hw *hw) +static enum iavf_status iavf_init_asq(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; if (hw->aq.asq.count > 0) { /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; + ret_code = IAVF_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_asq_entries == 0) || (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } @@ -342,17 +342,17 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw) hw->aq.asq.next_to_clean = 0; /* allocate the ring memory */ - ret_code = i40e_alloc_adminq_asq_ring(hw); + ret_code = iavf_alloc_adminq_asq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ - ret_code = i40e_alloc_asq_bufs(hw); + ret_code = iavf_alloc_asq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ - ret_code = i40e_config_asq_regs(hw); + ret_code = iavf_config_asq_regs(hw); if (ret_code) goto init_adminq_free_rings; @@ -361,14 +361,14 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw) goto init_adminq_exit; init_adminq_free_rings: - i40e_free_adminq_asq(hw); + iavf_free_adminq_asq(hw); init_adminq_exit: return ret_code; } /** - * i40e_init_arq - initialize ARQ + * iavf_init_arq - initialize ARQ * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. @@ -380,20 +380,20 @@ init_adminq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe **/ -static iavf_status i40e_init_arq(struct iavf_hw *hw) +static enum iavf_status iavf_init_arq(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; if (hw->aq.arq.count > 0) { /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; + ret_code = IAVF_ERR_NOT_READY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.arq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } @@ -401,17 +401,17 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw) hw->aq.arq.next_to_clean = 0; /* allocate the ring memory */ - ret_code = i40e_alloc_adminq_arq_ring(hw); + ret_code = iavf_alloc_adminq_arq_ring(hw); if (ret_code) goto init_adminq_exit; /* allocate buffers in the rings */ - ret_code = i40e_alloc_arq_bufs(hw); + ret_code = iavf_alloc_arq_bufs(hw); if (ret_code) goto init_adminq_free_rings; /* initialize base registers */ - ret_code = i40e_config_arq_regs(hw); + ret_code = iavf_config_arq_regs(hw); if (ret_code) goto init_adminq_free_rings; @@ -420,26 +420,26 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw) goto init_adminq_exit; init_adminq_free_rings: - i40e_free_adminq_arq(hw); + iavf_free_adminq_arq(hw); init_adminq_exit: return ret_code; } /** - * i40e_shutdown_asq - shutdown the ASQ + * iavf_shutdown_asq - shutdown the ASQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ -static iavf_status i40e_shutdown_asq(struct iavf_hw *hw) +static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { - ret_code = I40E_ERR_NOT_READY; + ret_code = IAVF_ERR_NOT_READY; goto shutdown_asq_out; } @@ -453,7 +453,7 @@ static iavf_status i40e_shutdown_asq(struct iavf_hw *hw) hw->aq.asq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ - i40e_free_asq_bufs(hw); + iavf_free_asq_bufs(hw); shutdown_asq_out: mutex_unlock(&hw->aq.asq_mutex); @@ -461,19 +461,19 @@ shutdown_asq_out: } /** - * i40e_shutdown_arq - shutdown ARQ + * iavf_shutdown_arq - shutdown ARQ * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ -static iavf_status i40e_shutdown_arq(struct iavf_hw *hw) +static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { - ret_code = I40E_ERR_NOT_READY; + ret_code = IAVF_ERR_NOT_READY; goto shutdown_arq_out; } @@ -487,7 +487,7 @@ static iavf_status i40e_shutdown_arq(struct iavf_hw *hw) hw->aq.arq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers */ - i40e_free_arq_bufs(hw); + iavf_free_arq_bufs(hw); shutdown_arq_out: mutex_unlock(&hw->aq.arq_mutex); @@ -505,32 +505,32 @@ shutdown_arq_out: * - hw->aq.arq_buf_size * - hw->aq.asq_buf_size **/ -iavf_status iavf_init_adminq(struct iavf_hw *hw) +enum iavf_status iavf_init_adminq(struct iavf_hw *hw) { - iavf_status ret_code; + enum iavf_status ret_code; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || (hw->aq.arq_buf_size == 0) || (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = IAVF_ERR_CONFIG; goto init_adminq_exit; } /* Set up register offsets */ - i40e_adminq_init_regs(hw); + iavf_adminq_init_regs(hw); /* setup ASQ command write back timeout */ - hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; /* allocate the ASQ */ - ret_code = i40e_init_asq(hw); + ret_code = iavf_init_asq(hw); if (ret_code) goto init_adminq_destroy_locks; /* allocate the ARQ */ - ret_code = i40e_init_arq(hw); + ret_code = iavf_init_arq(hw); if (ret_code) goto init_adminq_free_asq; @@ -538,7 +538,7 @@ iavf_status iavf_init_adminq(struct iavf_hw *hw) goto init_adminq_exit; init_adminq_free_asq: - i40e_shutdown_asq(hw); + iavf_shutdown_asq(hw); init_adminq_destroy_locks: init_adminq_exit: @@ -549,53 +549,53 @@ init_adminq_exit: * iavf_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ -iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) +enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { - iavf_status ret_code = 0; + enum iavf_status ret_code = 0; if (iavf_check_asq_alive(hw)) iavf_aq_queue_shutdown(hw, true); - i40e_shutdown_asq(hw); - i40e_shutdown_arq(hw); + iavf_shutdown_asq(hw); + iavf_shutdown_arq(hw); return ret_code; } /** - * i40e_clean_asq - cleans Admin send queue + * iavf_clean_asq - cleans Admin send queue * @hw: pointer to the hardware structure * * returns the number of free desc **/ -static u16 i40e_clean_asq(struct iavf_hw *hw) +static u16 iavf_clean_asq(struct iavf_hw *hw) { struct iavf_adminq_ring *asq = &hw->aq.asq; - struct i40e_asq_cmd_details *details; + struct iavf_asq_cmd_details *details; u16 ntc = asq->next_to_clean; - struct i40e_aq_desc desc_cb; - struct i40e_aq_desc *desc; + struct iavf_aq_desc desc_cb; + struct iavf_aq_desc *desc; desc = IAVF_ADMINQ_DESC(*asq, ntc); - details = I40E_ADMINQ_DETAILS(*asq, ntc); + details = IAVF_ADMINQ_DETAILS(*asq, ntc); while (rd32(hw, hw->aq.asq.head) != ntc) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); if (details->callback) { - I40E_ADMINQ_CALLBACK cb_func = - (I40E_ADMINQ_CALLBACK)details->callback; + IAVF_ADMINQ_CALLBACK cb_func = + (IAVF_ADMINQ_CALLBACK)details->callback; desc_cb = *desc; cb_func(hw, &desc_cb); } - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); memset((void *)details, 0, - sizeof(struct i40e_asq_cmd_details)); + sizeof(struct iavf_asq_cmd_details)); ntc++; if (ntc == asq->count) ntc = 0; desc = IAVF_ADMINQ_DESC(*asq, ntc); - details = I40E_ADMINQ_DETAILS(*asq, ntc); + details = IAVF_ADMINQ_DETAILS(*asq, ntc); } asq->next_to_clean = ntc; @@ -629,16 +629,17 @@ bool iavf_asq_done(struct iavf_hw *hw) * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, + struct iavf_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct iavf_asq_cmd_details *cmd_details) { struct iavf_dma_mem *dma_buff = NULL; - struct i40e_asq_cmd_details *details; - struct i40e_aq_desc *desc_on_ring; + struct iavf_asq_cmd_details *details; + struct iavf_aq_desc *desc_on_ring; bool cmd_completed = false; - iavf_status status = 0; + enum iavf_status status = 0; u16 retval = 0; u32 val = 0; @@ -647,21 +648,21 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, if (hw->aq.asq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); - status = I40E_ERR_QUEUE_EMPTY; + status = IAVF_ERR_QUEUE_EMPTY; goto asq_send_command_error; } - hw->aq.asq_last_status = I40E_AQ_RC_OK; + hw->aq.asq_last_status = IAVF_AQ_RC_OK; val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); - status = I40E_ERR_QUEUE_EMPTY; + status = IAVF_ERR_QUEUE_EMPTY; goto asq_send_command_error; } - details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; @@ -676,7 +677,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, cpu_to_le32(lower_32_bits(details->cookie)); } } else { - memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + memset(details, 0, sizeof(struct iavf_asq_cmd_details)); } /* clear requested flags and then set additional flags if defined */ @@ -688,7 +689,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); - status = I40E_ERR_INVALID_SIZE; + status = IAVF_ERR_INVALID_SIZE; goto asq_send_command_error; } @@ -696,7 +697,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); - status = I40E_ERR_PARAM; + status = IAVF_ERR_PARAM; goto asq_send_command_error; } @@ -707,11 +708,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ - if (i40e_clean_asq(hw) == 0) { + if (iavf_clean_asq(hw) == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); - status = I40E_ERR_ADMIN_QUEUE_FULL; + status = IAVF_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } @@ -780,13 +781,13 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, retval &= 0xff; } cmd_completed = true; - if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK) status = 0; - else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) - status = I40E_ERR_NOT_READY; + else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY) + status = IAVF_ERR_NOT_READY; else - status = I40E_ERR_ADMIN_QUEUE_ERROR; - hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + status = IAVF_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval; } iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, @@ -803,11 +804,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); - status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT; } } @@ -823,12 +824,12 @@ asq_send_command_error: * * Fill the desc with default values **/ -void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) +void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) { /* zero out the desc */ - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); desc->opcode = cpu_to_le16(opcode); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); + desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI); } /** @@ -841,13 +842,13 @@ void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) * the contents through e. It can also return how many events are * left to process through 'pending' **/ -iavf_status iavf_clean_arq_element(struct iavf_hw *hw, - struct i40e_arq_event_info *e, - u16 *pending) +enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct iavf_arq_event_info *e, + u16 *pending) { u16 ntc = hw->aq.arq.next_to_clean; - struct i40e_aq_desc *desc; - iavf_status ret_code = 0; + struct iavf_aq_desc *desc; + enum iavf_status ret_code = 0; struct iavf_dma_mem *bi; u16 desc_idx; u16 datalen; @@ -863,7 +864,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, if (hw->aq.arq.count == 0) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); - ret_code = I40E_ERR_QUEUE_EMPTY; + ret_code = IAVF_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } @@ -871,7 +872,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -880,10 +881,10 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, desc_idx = ntc; hw->aq.arq_last_status = - (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + (enum iavf_admin_queue_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); - if (flags & I40E_AQ_FLAG_ERR) { - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + if (flags & IAVF_AQ_FLAG_ERR) { + ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", @@ -906,11 +907,11 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw, * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); - if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); desc->datalen = cpu_to_le16((u16)bi->size); desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index ee983889eab0..baf2fe26f302 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -6,10 +6,10 @@ #include "iavf_osdep.h" #include "iavf_status.h" -#include "i40e_adminq_cmd.h" +#include "iavf_adminq_cmd.h" #define IAVF_ADMINQ_DESC(R, i) \ - (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i])) #define IAVF_ADMINQ_DESC_ALIGNMENT 4096 @@ -39,22 +39,22 @@ struct iavf_adminq_ring { }; /* ASQ transaction details */ -struct i40e_asq_cmd_details { - void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ +struct iavf_asq_cmd_details { + void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */ u64 cookie; u16 flags_ena; u16 flags_dis; bool async; bool postpone; - struct i40e_aq_desc *wb_desc; + struct iavf_aq_desc *wb_desc; }; -#define I40E_ADMINQ_DETAILS(R, i) \ - (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) +#define IAVF_ADMINQ_DETAILS(R, i) \ + (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i])) /* ARQ event information */ -struct i40e_arq_event_info { - struct i40e_aq_desc desc; +struct iavf_arq_event_info { + struct iavf_aq_desc desc; u16 msg_len; u16 buf_len; u8 *msg_buf; @@ -79,45 +79,45 @@ struct iavf_adminq_info { struct mutex arq_mutex; /* Receive queue lock */ /* last status values on send and receive queues */ - enum i40e_admin_queue_err asq_last_status; - enum i40e_admin_queue_err arq_last_status; + enum iavf_admin_queue_err asq_last_status; + enum iavf_admin_queue_err arq_last_status; }; /** - * i40e_aq_rc_to_posix - convert errors to user-land codes + * iavf_aq_rc_to_posix - convert errors to user-land codes * aq_ret: AdminQ handler error code can override aq_rc * aq_rc: AdminQ firmware error code to convert **/ -static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) +static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc) { int aq_to_posix[] = { - 0, /* I40E_AQ_RC_OK */ - -EPERM, /* I40E_AQ_RC_EPERM */ - -ENOENT, /* I40E_AQ_RC_ENOENT */ - -ESRCH, /* I40E_AQ_RC_ESRCH */ - -EINTR, /* I40E_AQ_RC_EINTR */ - -EIO, /* I40E_AQ_RC_EIO */ - -ENXIO, /* I40E_AQ_RC_ENXIO */ - -E2BIG, /* I40E_AQ_RC_E2BIG */ - -EAGAIN, /* I40E_AQ_RC_EAGAIN */ - -ENOMEM, /* I40E_AQ_RC_ENOMEM */ - -EACCES, /* I40E_AQ_RC_EACCES */ - -EFAULT, /* I40E_AQ_RC_EFAULT */ - -EBUSY, /* I40E_AQ_RC_EBUSY */ - -EEXIST, /* I40E_AQ_RC_EEXIST */ - -EINVAL, /* I40E_AQ_RC_EINVAL */ - -ENOTTY, /* I40E_AQ_RC_ENOTTY */ - -ENOSPC, /* I40E_AQ_RC_ENOSPC */ - -ENOSYS, /* I40E_AQ_RC_ENOSYS */ - -ERANGE, /* I40E_AQ_RC_ERANGE */ - -EPIPE, /* I40E_AQ_RC_EFLUSHED */ - -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ - -EROFS, /* I40E_AQ_RC_EMODE */ - -EFBIG, /* I40E_AQ_RC_EFBIG */ + 0, /* IAVF_AQ_RC_OK */ + -EPERM, /* IAVF_AQ_RC_EPERM */ + -ENOENT, /* IAVF_AQ_RC_ENOENT */ + -ESRCH, /* IAVF_AQ_RC_ESRCH */ + -EINTR, /* IAVF_AQ_RC_EINTR */ + -EIO, /* IAVF_AQ_RC_EIO */ + -ENXIO, /* IAVF_AQ_RC_ENXIO */ + -E2BIG, /* IAVF_AQ_RC_E2BIG */ + -EAGAIN, /* IAVF_AQ_RC_EAGAIN */ + -ENOMEM, /* IAVF_AQ_RC_ENOMEM */ + -EACCES, /* IAVF_AQ_RC_EACCES */ + -EFAULT, /* IAVF_AQ_RC_EFAULT */ + -EBUSY, /* IAVF_AQ_RC_EBUSY */ + -EEXIST, /* IAVF_AQ_RC_EEXIST */ + -EINVAL, /* IAVF_AQ_RC_EINVAL */ + -ENOTTY, /* IAVF_AQ_RC_ENOTTY */ + -ENOSPC, /* IAVF_AQ_RC_ENOSPC */ + -ENOSYS, /* IAVF_AQ_RC_ENOSYS */ + -ERANGE, /* IAVF_AQ_RC_ERANGE */ + -EPIPE, /* IAVF_AQ_RC_EFLUSHED */ + -ESPIPE, /* IAVF_AQ_RC_BAD_ADDR */ + -EROFS, /* IAVF_AQ_RC_EMODE */ + -EFBIG, /* IAVF_AQ_RC_EFBIG */ }; /* aq_rc is invalid if AQ timed out */ - if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + if (aq_ret == IAVF_ERR_ADMIN_QUEUE_TIMEOUT) return -EAGAIN; if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) @@ -127,9 +127,9 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) } /* general information */ -#define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ +#define IAVF_AQ_LARGE_BUF 512 +#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */ -void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); +void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode); #endif /* _IAVF_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h new file mode 100644 index 000000000000..bc512308557b --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _IAVF_ADMINQ_CMD_H_ +#define _IAVF_ADMINQ_CMD_H_ + +/* This header file defines the iavf Admin Queue commands and is shared between + * iavf Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define IAVF_FW_API_VERSION_MAJOR 0x0001 +#define IAVF_FW_API_VERSION_MINOR_X722 0x0005 +#define IAVF_FW_API_VERSION_MINOR_X710 0x0008 + +#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \ + IAVF_FW_API_VERSION_MINOR_X710 : \ + IAVF_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007 + +struct iavf_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define IAVF_AQ_FLAG_DD_SHIFT 0 +#define IAVF_AQ_FLAG_CMP_SHIFT 1 +#define IAVF_AQ_FLAG_ERR_SHIFT 2 +#define IAVF_AQ_FLAG_VFE_SHIFT 3 +#define IAVF_AQ_FLAG_LB_SHIFT 9 +#define IAVF_AQ_FLAG_RD_SHIFT 10 +#define IAVF_AQ_FLAG_VFC_SHIFT 11 +#define IAVF_AQ_FLAG_BUF_SHIFT 12 +#define IAVF_AQ_FLAG_SI_SHIFT 13 +#define IAVF_AQ_FLAG_EI_SHIFT 14 +#define IAVF_AQ_FLAG_FE_SHIFT 15 + +#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum iavf_admin_queue_err { + IAVF_AQ_RC_OK = 0, /* success */ + IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */ + IAVF_AQ_RC_ENOENT = 2, /* No such element */ + IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */ + IAVF_AQ_RC_EINTR = 4, /* operation interrupted */ + IAVF_AQ_RC_EIO = 5, /* I/O error */ + IAVF_AQ_RC_ENXIO = 6, /* No such resource */ + IAVF_AQ_RC_E2BIG = 7, /* Arg too long */ + IAVF_AQ_RC_EAGAIN = 8, /* Try again */ + IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */ + IAVF_AQ_RC_EACCES = 10, /* Permission denied */ + IAVF_AQ_RC_EFAULT = 11, /* Bad address */ + IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */ + IAVF_AQ_RC_EEXIST = 13, /* object already exists */ + IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */ + IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */ + IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */ + IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + IAVF_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum iavf_admin_queue_opc { + /* aq commands */ + iavf_aqc_opc_get_version = 0x0001, + iavf_aqc_opc_driver_version = 0x0002, + iavf_aqc_opc_queue_shutdown = 0x0003, + iavf_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + iavf_aqc_opc_request_resource = 0x0008, + iavf_aqc_opc_release_resource = 0x0009, + + iavf_aqc_opc_list_func_capabilities = 0x000A, + iavf_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + iavf_aqc_opc_set_proxy_config = 0x0104, + iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + iavf_aqc_opc_mac_address_read = 0x0107, + iavf_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + iavf_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + iavf_aqc_opc_set_wol_filter = 0x0120, + iavf_aqc_opc_get_wake_reason = 0x0121, + + /* internal switch commands */ + iavf_aqc_opc_get_switch_config = 0x0200, + iavf_aqc_opc_add_statistics = 0x0201, + iavf_aqc_opc_remove_statistics = 0x0202, + iavf_aqc_opc_set_port_parameters = 0x0203, + iavf_aqc_opc_get_switch_resource_alloc = 0x0204, + iavf_aqc_opc_set_switch_config = 0x0205, + iavf_aqc_opc_rx_ctl_reg_read = 0x0206, + iavf_aqc_opc_rx_ctl_reg_write = 0x0207, + + iavf_aqc_opc_add_vsi = 0x0210, + iavf_aqc_opc_update_vsi_parameters = 0x0211, + iavf_aqc_opc_get_vsi_parameters = 0x0212, + + iavf_aqc_opc_add_pv = 0x0220, + iavf_aqc_opc_update_pv_parameters = 0x0221, + iavf_aqc_opc_get_pv_parameters = 0x0222, + + iavf_aqc_opc_add_veb = 0x0230, + iavf_aqc_opc_update_veb_parameters = 0x0231, + iavf_aqc_opc_get_veb_parameters = 0x0232, + + iavf_aqc_opc_delete_element = 0x0243, + + iavf_aqc_opc_add_macvlan = 0x0250, + iavf_aqc_opc_remove_macvlan = 0x0251, + iavf_aqc_opc_add_vlan = 0x0252, + iavf_aqc_opc_remove_vlan = 0x0253, + iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + iavf_aqc_opc_add_tag = 0x0255, + iavf_aqc_opc_remove_tag = 0x0256, + iavf_aqc_opc_add_multicast_etag = 0x0257, + iavf_aqc_opc_remove_multicast_etag = 0x0258, + iavf_aqc_opc_update_tag = 0x0259, + iavf_aqc_opc_add_control_packet_filter = 0x025A, + iavf_aqc_opc_remove_control_packet_filter = 0x025B, + iavf_aqc_opc_add_cloud_filters = 0x025C, + iavf_aqc_opc_remove_cloud_filters = 0x025D, + iavf_aqc_opc_clear_wol_switch_filters = 0x025E, + + iavf_aqc_opc_add_mirror_rule = 0x0260, + iavf_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + iavf_aqc_opc_write_personalization_profile = 0x0270, + iavf_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + iavf_aqc_opc_dcb_ignore_pfc = 0x0301, + iavf_aqc_opc_dcb_updated = 0x0302, + iavf_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + iavf_aqc_opc_configure_vsi_bw_limit = 0x0400, + iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + iavf_aqc_opc_configure_vsi_tc_bw = 0x0407, + iavf_aqc_opc_query_vsi_bw_config = 0x0408, + iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A, + iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + iavf_aqc_opc_enable_switching_comp_ets = 0x0413, + iavf_aqc_opc_modify_switching_comp_ets = 0x0414, + iavf_aqc_opc_disable_switching_comp_ets = 0x0415, + iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417, + iavf_aqc_opc_query_switching_comp_ets_config = 0x0418, + iavf_aqc_opc_query_port_ets_config = 0x0419, + iavf_aqc_opc_query_switching_comp_bw_config = 0x041A, + iavf_aqc_opc_suspend_port_tx = 0x041B, + iavf_aqc_opc_resume_port_tx = 0x041C, + iavf_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + iavf_aqc_opc_query_hmc_resource_profile = 0x0500, + iavf_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + iavf_aqc_opc_get_phy_abilities = 0x0600, + iavf_aqc_opc_set_phy_config = 0x0601, + iavf_aqc_opc_set_mac_config = 0x0603, + iavf_aqc_opc_set_link_restart_an = 0x0605, + iavf_aqc_opc_get_link_status = 0x0607, + iavf_aqc_opc_set_phy_int_mask = 0x0613, + iavf_aqc_opc_get_local_advt_reg = 0x0614, + iavf_aqc_opc_set_local_advt_reg = 0x0615, + iavf_aqc_opc_get_partner_advt = 0x0616, + iavf_aqc_opc_set_lb_modes = 0x0618, + iavf_aqc_opc_get_phy_wol_caps = 0x0621, + iavf_aqc_opc_set_phy_debug = 0x0622, + iavf_aqc_opc_upload_ext_phy_fm = 0x0625, + iavf_aqc_opc_run_phy_activity = 0x0626, + iavf_aqc_opc_set_phy_register = 0x0628, + iavf_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + iavf_aqc_opc_nvm_read = 0x0701, + iavf_aqc_opc_nvm_erase = 0x0702, + iavf_aqc_opc_nvm_update = 0x0703, + iavf_aqc_opc_nvm_config_read = 0x0704, + iavf_aqc_opc_nvm_config_write = 0x0705, + iavf_aqc_opc_oem_post_update = 0x0720, + iavf_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + iavf_aqc_opc_send_msg_to_pf = 0x0801, + iavf_aqc_opc_send_msg_to_vf = 0x0802, + iavf_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + iavf_aqc_opc_alternate_write = 0x0900, + iavf_aqc_opc_alternate_write_indirect = 0x0901, + iavf_aqc_opc_alternate_read = 0x0902, + iavf_aqc_opc_alternate_read_indirect = 0x0903, + iavf_aqc_opc_alternate_write_done = 0x0904, + iavf_aqc_opc_alternate_set_mode = 0x0905, + iavf_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + iavf_aqc_opc_lldp_get_mib = 0x0A00, + iavf_aqc_opc_lldp_update_mib = 0x0A01, + iavf_aqc_opc_lldp_add_tlv = 0x0A02, + iavf_aqc_opc_lldp_update_tlv = 0x0A03, + iavf_aqc_opc_lldp_delete_tlv = 0x0A04, + iavf_aqc_opc_lldp_stop = 0x0A05, + iavf_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + iavf_aqc_opc_add_udp_tunnel = 0x0B00, + iavf_aqc_opc_del_udp_tunnel = 0x0B01, + iavf_aqc_opc_set_rss_key = 0x0B02, + iavf_aqc_opc_set_rss_lut = 0x0B03, + iavf_aqc_opc_get_rss_key = 0x0B04, + iavf_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + iavf_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + iavf_aqc_opc_oem_parameter_change = 0xFE00, + iavf_aqc_opc_oem_device_status_change = 0xFE01, + iavf_aqc_opc_oem_ocsd_initialize = 0xFE02, + iavf_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + iavf_aqc_opc_debug_read_reg = 0xFF03, + iavf_aqc_opc_debug_write_reg = 0xFF04, + iavf_aqc_opc_debug_modify_reg = 0xFF07, + iavf_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \ + { iavf_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X) + +/* Queue Shutdown (direct 0x0003) */ +struct iavf_aqc_queue_shutdown { + __le32 driver_unloading; +#define IAVF_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown); + +struct iavf_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000 +#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT) +#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + IAVF_AQ_VSI_PVLAN_MODE_SHIFT) +#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + IAVF_AQ_VSI_PVLAN_EMOD_SHIFT) +#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0 +#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data); + +/* Get VEB Parameters (direct 0x0232) + * uses iavf_aqc_switch_seid for the descriptor + */ +struct iavf_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion); + +#define IAVF_LINK_SPEED_100MB_SHIFT 0x1 +#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2 +#define IAVF_LINK_SPEED_10GB_SHIFT 0x3 +#define IAVF_LINK_SPEED_40GB_SHIFT 0x4 +#define IAVF_LINK_SPEED_20GB_SHIFT 0x5 +#define IAVF_LINK_SPEED_25GB_SHIFT 0x6 + +enum iavf_aq_link_speed { + IAVF_LINK_SPEED_UNKNOWN = 0, + IAVF_LINK_SPEED_100MB = BIT(IAVF_LINK_SPEED_100MB_SHIFT), + IAVF_LINK_SPEED_1GB = BIT(IAVF_LINK_SPEED_1000MB_SHIFT), + IAVF_LINK_SPEED_10GB = BIT(IAVF_LINK_SPEED_10GB_SHIFT), + IAVF_LINK_SPEED_40GB = BIT(IAVF_LINK_SPEED_40GB_SHIFT), + IAVF_LINK_SPEED_20GB = BIT(IAVF_LINK_SPEED_20GB_SHIFT), + IAVF_LINK_SPEED_25GB = BIT(IAVF_LINK_SPEED_25GB_SHIFT), +}; + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct iavf_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message); + +struct iavf_aqc_get_set_rss_key { +#define IAVF_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key); + +struct iavf_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data); + +struct iavf_aqc_get_set_rss_lut { +#define IAVF_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut); +#endif /* _IAVF_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h index bf2753146f30..2711573c14ec 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_alloc.h +++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h @@ -20,12 +20,15 @@ enum iavf_memory_type { }; /* prototype for functions used for dynamic memory allocation */ -iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem, - enum iavf_memory_type type, - u64 size, u32 alignment); -iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem); -iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, - struct iavf_virt_mem *mem, u32 size); -iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem); +enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + enum iavf_memory_type type, + u64 size, u32 alignment); +enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, + struct iavf_dma_mem *mem); +enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size); +enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw, + struct iavf_virt_mem *mem); #endif /* _IAVF_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index aea45364fd1c..0c77e4171808 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -10,19 +10,19 @@ static const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; -static struct i40e_client *vf_registered_client; -static LIST_HEAD(i40e_devices); +static struct iavf_client *vf_registered_client; +static LIST_HEAD(iavf_devices); static DEFINE_MUTEX(iavf_device_mutex); -static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, +static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, + struct iavf_client *client, u8 *msg, u16 len); -static int iavf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info); +static int iavf_client_setup_qvlist(struct iavf_info *ldev, + struct iavf_client *client, + struct iavf_qvlist_info *qvlist_info); -static struct i40e_ops iavf_lan_ops = { +static struct iavf_ops iavf_lan_ops = { .virtchnl_send = iavf_client_virtchnl_send, .setup_qvlist = iavf_client_setup_qvlist, }; @@ -33,11 +33,11 @@ static struct i40e_ops iavf_lan_ops = { * @params: client param struct **/ static -void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params) +void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params) { int i; - memset(params, 0, sizeof(struct i40e_params)); + memset(params, 0, sizeof(struct iavf_params)); params->mtu = vsi->netdev->mtu; params->link_up = vsi->back->link_up; @@ -57,7 +57,7 @@ void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params) **/ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) { - struct i40e_client_instance *cinst; + struct iavf_client_instance *cinst; if (!vsi) return; @@ -81,8 +81,8 @@ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) **/ void iavf_notify_client_l2_params(struct iavf_vsi *vsi) { - struct i40e_client_instance *cinst; - struct i40e_params params; + struct iavf_client_instance *cinst; + struct iavf_params params; if (!vsi) return; @@ -110,7 +110,7 @@ void iavf_notify_client_l2_params(struct iavf_vsi *vsi) void iavf_notify_client_open(struct iavf_vsi *vsi) { struct iavf_adapter *adapter = vsi->back; - struct i40e_client_instance *cinst = adapter->cinst; + struct iavf_client_instance *cinst = adapter->cinst; int ret; if (!cinst || !cinst->client || !cinst->client->ops || @@ -119,10 +119,10 @@ void iavf_notify_client_open(struct iavf_vsi *vsi) "Cannot locate client instance open function\n"); return; } - if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) { + if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) { ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); } } @@ -132,17 +132,17 @@ void iavf_notify_client_open(struct iavf_vsi *vsi) * * Return 0 on success or < 0 on error **/ -static int iavf_client_release_qvlist(struct i40e_info *ldev) +static int iavf_client_release_qvlist(struct iavf_info *ldev) { struct iavf_adapter *adapter = ldev->vf; - iavf_status err; + enum iavf_status err; if (adapter->aq_required) return -EAGAIN; err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, - I40E_SUCCESS, NULL, 0, NULL); + IAVF_SUCCESS, NULL, 0, NULL); if (err) dev_err(&adapter->pdev->dev, @@ -162,7 +162,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev) void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) { struct iavf_adapter *adapter = vsi->back; - struct i40e_client_instance *cinst = adapter->cinst; + struct iavf_client_instance *cinst = adapter->cinst; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->close) { @@ -172,7 +172,7 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) } cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); } /** @@ -181,13 +181,13 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) * * Returns cinst ptr on success, NULL on failure **/ -static struct i40e_client_instance * +static struct iavf_client_instance * iavf_client_add_instance(struct iavf_adapter *adapter) { - struct i40e_client_instance *cinst = NULL; + struct iavf_client_instance *cinst = NULL; struct iavf_vsi *vsi = &adapter->vsi; struct netdev_hw_addr *mac = NULL; - struct i40e_params params; + struct iavf_params params; if (!vf_registered_client) goto out; @@ -205,7 +205,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter) cinst->lan_info.netdev = vsi->netdev; cinst->lan_info.pcidev = adapter->pdev; cinst->lan_info.fid = 0; - cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; + cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF; cinst->lan_info.hw_addr = adapter->hw.hw_addr; cinst->lan_info.ops = &iavf_lan_ops; cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; @@ -213,7 +213,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter) cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; iavf_client_get_params(vsi, ¶ms); cinst->lan_info.params = params; - set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); + set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); cinst->lan_info.msix_count = adapter->num_iwarp_msix; cinst->lan_info.msix_entries = @@ -250,8 +250,8 @@ void iavf_client_del_instance(struct iavf_adapter *adapter) **/ void iavf_client_subtask(struct iavf_adapter *adapter) { - struct i40e_client *client = vf_registered_client; - struct i40e_client_instance *cinst; + struct iavf_client *client = vf_registered_client; + struct iavf_client_instance *cinst; int ret = 0; if (adapter->state < __IAVF_DOWN) @@ -269,13 +269,13 @@ void iavf_client_subtask(struct iavf_adapter *adapter) dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", client->name); - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { /* Send an Open request to the client */ if (client->ops && client->ops->open) ret = client->ops->open(&cinst->lan_info, client); if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, + set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); else /* remove client instance */ @@ -291,11 +291,11 @@ void iavf_client_subtask(struct iavf_adapter *adapter) **/ int iavf_lan_add_device(struct iavf_adapter *adapter) { - struct i40e_device *ldev; + struct iavf_device *ldev; int ret = 0; mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &i40e_devices, list) { + list_for_each_entry(ldev, &iavf_devices, list) { if (ldev->vf == adapter) { ret = -EEXIST; goto out; @@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter) } ldev->vf = adapter; INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &i40e_devices); + list_add(&ldev->list, &iavf_devices); dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", adapter->hw.bus.bus_id, adapter->hw.bus.device, adapter->hw.bus.func); @@ -331,11 +331,11 @@ out: **/ int iavf_lan_del_device(struct iavf_adapter *adapter) { - struct i40e_device *ldev, *tmp; + struct iavf_device *ldev, *tmp; int ret = -ENODEV; mutex_lock(&iavf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { + list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) { if (ldev->vf == adapter) { dev_info(&adapter->pdev->dev, "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", @@ -357,24 +357,24 @@ int iavf_lan_del_device(struct iavf_adapter *adapter) * @client: pointer to the registered client * **/ -static void iavf_client_release(struct i40e_client *client) +static void iavf_client_release(struct iavf_client *client) { - struct i40e_client_instance *cinst; - struct i40e_device *ldev; + struct iavf_client_instance *cinst; + struct iavf_device *ldev; struct iavf_adapter *adapter; mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &i40e_devices, list) { + list_for_each_entry(ldev, &iavf_devices, list) { adapter = ldev->vf; cinst = adapter->cinst; if (!cinst) continue; - if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { if (client->ops && client->ops->close) client->ops->close(&cinst->lan_info, client, false); iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); dev_warn(&adapter->pdev->dev, "Client %s instance closed\n", client->name); @@ -392,13 +392,13 @@ static void iavf_client_release(struct i40e_client *client) * @client: pointer to the registered client * **/ -static void iavf_client_prepare(struct i40e_client *client) +static void iavf_client_prepare(struct iavf_client *client) { - struct i40e_device *ldev; + struct iavf_device *ldev; struct iavf_adapter *adapter; mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &i40e_devices, list) { + list_for_each_entry(ldev, &iavf_devices, list) { adapter = ldev->vf; /* Signal the watchdog to service the client */ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; @@ -415,18 +415,18 @@ static void iavf_client_prepare(struct i40e_client *client) * * Return 0 on success or < 0 on error **/ -static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, +static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, + struct iavf_client *client, u8 *msg, u16 len) { struct iavf_adapter *adapter = ldev->vf; - iavf_status err; + enum iavf_status err; if (adapter->aq_required) return -EAGAIN; err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, - I40E_SUCCESS, msg, len, NULL); + IAVF_SUCCESS, msg, len, NULL); if (err) dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", err, adapter->hw.aq.asq_last_status); @@ -442,16 +442,16 @@ static u32 iavf_client_virtchnl_send(struct i40e_info *ldev, * * Return 0 on success or < 0 on error **/ -static int iavf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info) +static int iavf_client_setup_qvlist(struct iavf_info *ldev, + struct iavf_client *client, + struct iavf_qvlist_info *qvlist_info) { struct virtchnl_iwarp_qvlist_info *v_qvlist_info; struct iavf_adapter *adapter = ldev->vf; - struct i40e_qv_info *qv_info; - iavf_status err; + struct iavf_qv_info *qv_info; + enum iavf_status err; u32 v_idx, i; - u32 msg_size; + size_t msg_size; if (adapter->aq_required) return -EAGAIN; @@ -469,13 +469,12 @@ static int iavf_client_setup_qvlist(struct i40e_info *ldev, } v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; - msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) + - (sizeof(struct virtchnl_iwarp_qv_info) * - (v_qvlist_info->num_vectors - 1)); + msg_size = struct_size(v_qvlist_info, qv_info, + v_qvlist_info->num_vectors - 1); adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); if (err) { @@ -499,12 +498,12 @@ out: } /** - * iavf_register_client - Register a i40e client driver with the L2 driver - * @client: pointer to the i40e_client struct + * iavf_register_client - Register a iavf client driver with the L2 driver + * @client: pointer to the iavf_client struct * * Returns 0 on success or non-0 on error **/ -int iavf_register_client(struct i40e_client *client) +int iavf_register_client(struct iavf_client *client) { int ret = 0; @@ -550,12 +549,12 @@ out: EXPORT_SYMBOL(iavf_register_client); /** - * iavf_unregister_client - Unregister a i40e client driver with the L2 driver - * @client: pointer to the i40e_client struct + * iavf_unregister_client - Unregister a iavf client driver with the L2 driver + * @client: pointer to the iavf_client struct * * Returns 0 on success or non-0 on error **/ -int iavf_unregister_client(struct i40e_client *client) +int iavf_unregister_client(struct iavf_client *client) { int ret = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index e216fc9dfd81..9a7cf39ea75a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -17,86 +17,86 @@ __stringify(IAVF_CLIENT_VERSION_MINOR) "." \ __stringify(IAVF_CLIENT_VERSION_BUILD) -struct i40e_client_version { +struct iavf_client_version { u8 major; u8 minor; u8 build; u8 rsvd; }; -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED +enum iavf_client_state { + __IAVF_CLIENT_NULL, + __IAVF_CLIENT_REGISTERED }; -enum i40e_client_instance_state { - __I40E_CLIENT_INSTANCE_NONE, - __I40E_CLIENT_INSTANCE_OPENED, +enum iavf_client_instance_state { + __IAVF_CLIENT_INSTANCE_NONE, + __IAVF_CLIENT_INSTANCE_OPENED, }; -struct i40e_ops; -struct i40e_client; +struct iavf_ops; +struct iavf_client; /* HW does not define a type value for AEQ; only for RX/TX and CEQ. * In order for us to keep the interface simple, SW will define a * unique type value for AEQ. */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF +#define IAVF_QUEUE_TYPE_PE_AEQ 0x80 +#define IAVF_QUEUE_INVALID_IDX 0xFFFF -struct i40e_qv_info { +struct iavf_qv_info { u32 v_idx; /* msix_vector */ u16 ceq_idx; u16 aeq_idx; u8 itr_idx; }; -struct i40e_qvlist_info { +struct iavf_qvlist_info { u32 num_vectors; - struct i40e_qv_info qv_info[1]; + struct iavf_qv_info qv_info[1]; }; -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF +#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF /* set of LAN parameters useful for clients managed by LAN */ /* Struct to hold per priority info */ -struct i40e_prio_qos_params { +struct iavf_prio_qos_params { u16 qs_handle; /* qs handle for prio */ u8 tc; /* TC mapped to prio */ u8 reserved; }; -#define I40E_CLIENT_MAX_USER_PRIORITY 8 +#define IAVF_CLIENT_MAX_USER_PRIORITY 8 /* Struct to hold Client QoS */ -struct i40e_qos_params { - struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +struct iavf_qos_params { + struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY]; }; -struct i40e_params { - struct i40e_qos_params qos; +struct iavf_params { + struct iavf_qos_params qos; u16 mtu; u16 link_up; /* boolean */ }; /* Structure to hold LAN device info for a client device */ -struct i40e_info { - struct i40e_client_version version; +struct iavf_info { + struct iavf_client_version version; u8 lanmac[6]; struct net_device *netdev; struct pci_dev *pcidev; u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ -#define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 +#define IAVF_CLIENT_FTYPE_PF 0 +#define IAVF_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ void *vf; /* cast to iavf_adapter */ /* All L2 params that could change during the life span of the device * and needs to be communicated to the client when they change */ - struct i40e_params params; - struct i40e_ops *ops; + struct iavf_params params; + struct iavf_ops *ops; u16 msix_count; /* number of msix vectors*/ /* Array down below will be dynamically allocated based on msix_count */ @@ -104,66 +104,66 @@ struct i40e_info { u16 itr_index; /* Which ITR index the PE driver is suppose to use */ }; -struct i40e_ops { +struct iavf_ops { /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, - struct i40e_qvlist_info *qv_info); + int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client, + struct iavf_qvlist_info *qv_info); - u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client, u8 *msg, u16 len); /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ - void (*request_reset)(struct i40e_info *ldev, - struct i40e_client *client); + void (*request_reset)(struct iavf_info *ldev, + struct iavf_client *client); }; -struct i40e_client_ops { +struct iavf_client_ops { /* Should be called from register_client() or whenever the driver is * ready to create a specific client instance. */ - int (*open)(struct i40e_info *ldev, struct i40e_client *client); + int (*open)(struct iavf_info *ldev, struct iavf_client *client); /* Should be closed when netdev is unavailable or when unregister * call comes in. If the close happens due to a reset, set the reset * bit to true. */ - void (*close)(struct i40e_info *ldev, struct i40e_client *client, + void (*close)(struct iavf_info *ldev, struct iavf_client *client, bool reset); /* called when some l2 managed parameters changes - mss */ - void (*l2_param_change)(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_params *params); + void (*l2_param_change)(struct iavf_info *ldev, + struct iavf_client *client, + struct iavf_params *params); /* called when a message is received from the PF */ - int (*virtchnl_receive)(struct i40e_info *ldev, - struct i40e_client *client, + int (*virtchnl_receive)(struct iavf_info *ldev, + struct iavf_client *client, u8 *msg, u16 len); }; /* Client device */ -struct i40e_client_instance { +struct iavf_client_instance { struct list_head list; - struct i40e_info lan_info; - struct i40e_client *client; + struct iavf_info lan_info; + struct iavf_client *client; unsigned long state; }; -struct i40e_client { +struct iavf_client { struct list_head list; /* list of registered clients */ char name[IAVF_CLIENT_STR_LENGTH]; - struct i40e_client_version version; + struct iavf_client_version version; unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) +#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; -#define I40E_CLIENT_IWARP 0 - struct i40e_client_ops *ops; /* client ops provided by the client */ +#define IAVF_CLIENT_IWARP 0 + struct iavf_client_ops *ops; /* client ops provided by the client */ }; /* used by clients */ -int iavf_register_client(struct i40e_client *client); -int iavf_unregister_client(struct i40e_client *client); +int iavf_register_client(struct iavf_client *client); +int iavf_unregister_client(struct iavf_client *client); #endif /* _IAVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 768369c89e77..8547fc8fdfd6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -2,7 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "iavf_type.h" -#include "i40e_adminq.h" +#include "iavf_adminq.h" #include "iavf_prototype.h" #include <linux/avf/virtchnl.h> @@ -13,9 +13,9 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -iavf_status iavf_set_mac_type(struct iavf_hw *hw) +enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) { - iavf_status status = 0; + enum iavf_status status = 0; if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { @@ -32,7 +32,7 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw) break; } } else { - status = I40E_ERR_DEVICE_NOT_SUPPORTED; + status = IAVF_ERR_DEVICE_NOT_SUPPORTED; } hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status); @@ -44,55 +44,55 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw) * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert **/ -const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err) +const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err) { switch (aq_err) { - case I40E_AQ_RC_OK: + case IAVF_AQ_RC_OK: return "OK"; - case I40E_AQ_RC_EPERM: - return "I40E_AQ_RC_EPERM"; - case I40E_AQ_RC_ENOENT: - return "I40E_AQ_RC_ENOENT"; - case I40E_AQ_RC_ESRCH: - return "I40E_AQ_RC_ESRCH"; - case I40E_AQ_RC_EINTR: - return "I40E_AQ_RC_EINTR"; - case I40E_AQ_RC_EIO: - return "I40E_AQ_RC_EIO"; - case I40E_AQ_RC_ENXIO: - return "I40E_AQ_RC_ENXIO"; - case I40E_AQ_RC_E2BIG: - return "I40E_AQ_RC_E2BIG"; - case I40E_AQ_RC_EAGAIN: - return "I40E_AQ_RC_EAGAIN"; - case I40E_AQ_RC_ENOMEM: - return "I40E_AQ_RC_ENOMEM"; - case I40E_AQ_RC_EACCES: - return "I40E_AQ_RC_EACCES"; - case I40E_AQ_RC_EFAULT: - return "I40E_AQ_RC_EFAULT"; - case I40E_AQ_RC_EBUSY: - return "I40E_AQ_RC_EBUSY"; - case I40E_AQ_RC_EEXIST: - return "I40E_AQ_RC_EEXIST"; - case I40E_AQ_RC_EINVAL: - return "I40E_AQ_RC_EINVAL"; - case I40E_AQ_RC_ENOTTY: - return "I40E_AQ_RC_ENOTTY"; - case I40E_AQ_RC_ENOSPC: - return "I40E_AQ_RC_ENOSPC"; - case I40E_AQ_RC_ENOSYS: - return "I40E_AQ_RC_ENOSYS"; - case I40E_AQ_RC_ERANGE: - return "I40E_AQ_RC_ERANGE"; - case I40E_AQ_RC_EFLUSHED: - return "I40E_AQ_RC_EFLUSHED"; - case I40E_AQ_RC_BAD_ADDR: - return "I40E_AQ_RC_BAD_ADDR"; - case I40E_AQ_RC_EMODE: - return "I40E_AQ_RC_EMODE"; - case I40E_AQ_RC_EFBIG: - return "I40E_AQ_RC_EFBIG"; + case IAVF_AQ_RC_EPERM: + return "IAVF_AQ_RC_EPERM"; + case IAVF_AQ_RC_ENOENT: + return "IAVF_AQ_RC_ENOENT"; + case IAVF_AQ_RC_ESRCH: + return "IAVF_AQ_RC_ESRCH"; + case IAVF_AQ_RC_EINTR: + return "IAVF_AQ_RC_EINTR"; + case IAVF_AQ_RC_EIO: + return "IAVF_AQ_RC_EIO"; + case IAVF_AQ_RC_ENXIO: + return "IAVF_AQ_RC_ENXIO"; + case IAVF_AQ_RC_E2BIG: + return "IAVF_AQ_RC_E2BIG"; + case IAVF_AQ_RC_EAGAIN: + return "IAVF_AQ_RC_EAGAIN"; + case IAVF_AQ_RC_ENOMEM: + return "IAVF_AQ_RC_ENOMEM"; + case IAVF_AQ_RC_EACCES: + return "IAVF_AQ_RC_EACCES"; + case IAVF_AQ_RC_EFAULT: + return "IAVF_AQ_RC_EFAULT"; + case IAVF_AQ_RC_EBUSY: + return "IAVF_AQ_RC_EBUSY"; + case IAVF_AQ_RC_EEXIST: + return "IAVF_AQ_RC_EEXIST"; + case IAVF_AQ_RC_EINVAL: + return "IAVF_AQ_RC_EINVAL"; + case IAVF_AQ_RC_ENOTTY: + return "IAVF_AQ_RC_ENOTTY"; + case IAVF_AQ_RC_ENOSPC: + return "IAVF_AQ_RC_ENOSPC"; + case IAVF_AQ_RC_ENOSYS: + return "IAVF_AQ_RC_ENOSYS"; + case IAVF_AQ_RC_ERANGE: + return "IAVF_AQ_RC_ERANGE"; + case IAVF_AQ_RC_EFLUSHED: + return "IAVF_AQ_RC_EFLUSHED"; + case IAVF_AQ_RC_BAD_ADDR: + return "IAVF_AQ_RC_BAD_ADDR"; + case IAVF_AQ_RC_EMODE: + return "IAVF_AQ_RC_EMODE"; + case IAVF_AQ_RC_EFBIG: + return "IAVF_AQ_RC_EFBIG"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); @@ -104,143 +104,143 @@ const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err) * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ -const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err) +const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) { switch (stat_err) { case 0: return "OK"; - case I40E_ERR_NVM: - return "I40E_ERR_NVM"; - case I40E_ERR_NVM_CHECKSUM: - return "I40E_ERR_NVM_CHECKSUM"; - case I40E_ERR_PHY: - return "I40E_ERR_PHY"; - case I40E_ERR_CONFIG: - return "I40E_ERR_CONFIG"; - case I40E_ERR_PARAM: - return "I40E_ERR_PARAM"; - case I40E_ERR_MAC_TYPE: - return "I40E_ERR_MAC_TYPE"; - case I40E_ERR_UNKNOWN_PHY: - return "I40E_ERR_UNKNOWN_PHY"; - case I40E_ERR_LINK_SETUP: - return "I40E_ERR_LINK_SETUP"; - case I40E_ERR_ADAPTER_STOPPED: - return "I40E_ERR_ADAPTER_STOPPED"; - case I40E_ERR_INVALID_MAC_ADDR: - return "I40E_ERR_INVALID_MAC_ADDR"; - case I40E_ERR_DEVICE_NOT_SUPPORTED: - return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_MASTER_REQUESTS_PENDING: - return "I40E_ERR_MASTER_REQUESTS_PENDING"; - case I40E_ERR_INVALID_LINK_SETTINGS: - return "I40E_ERR_INVALID_LINK_SETTINGS"; - case I40E_ERR_AUTONEG_NOT_COMPLETE: - return "I40E_ERR_AUTONEG_NOT_COMPLETE"; - case I40E_ERR_RESET_FAILED: - return "I40E_ERR_RESET_FAILED"; - case I40E_ERR_SWFW_SYNC: - return "I40E_ERR_SWFW_SYNC"; - case I40E_ERR_NO_AVAILABLE_VSI: - return "I40E_ERR_NO_AVAILABLE_VSI"; - case I40E_ERR_NO_MEMORY: - return "I40E_ERR_NO_MEMORY"; - case I40E_ERR_BAD_PTR: - return "I40E_ERR_BAD_PTR"; - case I40E_ERR_RING_FULL: - return "I40E_ERR_RING_FULL"; - case I40E_ERR_INVALID_PD_ID: - return "I40E_ERR_INVALID_PD_ID"; - case I40E_ERR_INVALID_QP_ID: - return "I40E_ERR_INVALID_QP_ID"; - case I40E_ERR_INVALID_CQ_ID: - return "I40E_ERR_INVALID_CQ_ID"; - case I40E_ERR_INVALID_CEQ_ID: - return "I40E_ERR_INVALID_CEQ_ID"; - case I40E_ERR_INVALID_AEQ_ID: - return "I40E_ERR_INVALID_AEQ_ID"; - case I40E_ERR_INVALID_SIZE: - return "I40E_ERR_INVALID_SIZE"; - case I40E_ERR_INVALID_ARP_INDEX: - return "I40E_ERR_INVALID_ARP_INDEX"; - case I40E_ERR_INVALID_FPM_FUNC_ID: - return "I40E_ERR_INVALID_FPM_FUNC_ID"; - case I40E_ERR_QP_INVALID_MSG_SIZE: - return "I40E_ERR_QP_INVALID_MSG_SIZE"; - case I40E_ERR_QP_TOOMANY_WRS_POSTED: - return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; - case I40E_ERR_INVALID_FRAG_COUNT: - return "I40E_ERR_INVALID_FRAG_COUNT"; - case I40E_ERR_QUEUE_EMPTY: - return "I40E_ERR_QUEUE_EMPTY"; - case I40E_ERR_INVALID_ALIGNMENT: - return "I40E_ERR_INVALID_ALIGNMENT"; - case I40E_ERR_FLUSHED_QUEUE: - return "I40E_ERR_FLUSHED_QUEUE"; - case I40E_ERR_INVALID_PUSH_PAGE_INDEX: - return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; - case I40E_ERR_INVALID_IMM_DATA_SIZE: - return "I40E_ERR_INVALID_IMM_DATA_SIZE"; - case I40E_ERR_TIMEOUT: - return "I40E_ERR_TIMEOUT"; - case I40E_ERR_OPCODE_MISMATCH: - return "I40E_ERR_OPCODE_MISMATCH"; - case I40E_ERR_CQP_COMPL_ERROR: - return "I40E_ERR_CQP_COMPL_ERROR"; - case I40E_ERR_INVALID_VF_ID: - return "I40E_ERR_INVALID_VF_ID"; - case I40E_ERR_INVALID_HMCFN_ID: - return "I40E_ERR_INVALID_HMCFN_ID"; - case I40E_ERR_BACKING_PAGE_ERROR: - return "I40E_ERR_BACKING_PAGE_ERROR"; - case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: - return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; - case I40E_ERR_INVALID_PBLE_INDEX: - return "I40E_ERR_INVALID_PBLE_INDEX"; - case I40E_ERR_INVALID_SD_INDEX: - return "I40E_ERR_INVALID_SD_INDEX"; - case I40E_ERR_INVALID_PAGE_DESC_INDEX: - return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; - case I40E_ERR_INVALID_SD_TYPE: - return "I40E_ERR_INVALID_SD_TYPE"; - case I40E_ERR_MEMCPY_FAILED: - return "I40E_ERR_MEMCPY_FAILED"; - case I40E_ERR_INVALID_HMC_OBJ_INDEX: - return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; - case I40E_ERR_INVALID_HMC_OBJ_COUNT: - return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; - case I40E_ERR_INVALID_SRQ_ARM_LIMIT: - return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; - case I40E_ERR_SRQ_ENABLED: - return "I40E_ERR_SRQ_ENABLED"; - case I40E_ERR_ADMIN_QUEUE_ERROR: - return "I40E_ERR_ADMIN_QUEUE_ERROR"; - case I40E_ERR_ADMIN_QUEUE_TIMEOUT: - return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; - case I40E_ERR_BUF_TOO_SHORT: - return "I40E_ERR_BUF_TOO_SHORT"; - case I40E_ERR_ADMIN_QUEUE_FULL: - return "I40E_ERR_ADMIN_QUEUE_FULL"; - case I40E_ERR_ADMIN_QUEUE_NO_WORK: - return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; - case I40E_ERR_BAD_IWARP_CQE: - return "I40E_ERR_BAD_IWARP_CQE"; - case I40E_ERR_NVM_BLANK_MODE: - return "I40E_ERR_NVM_BLANK_MODE"; - case I40E_ERR_NOT_IMPLEMENTED: - return "I40E_ERR_NOT_IMPLEMENTED"; - case I40E_ERR_PE_DOORBELL_NOT_ENABLED: - return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; - case I40E_ERR_DIAG_TEST_FAILED: - return "I40E_ERR_DIAG_TEST_FAILED"; - case I40E_ERR_NOT_READY: - return "I40E_ERR_NOT_READY"; - case I40E_NOT_SUPPORTED: - return "I40E_NOT_SUPPORTED"; - case I40E_ERR_FIRMWARE_API_VERSION: - return "I40E_ERR_FIRMWARE_API_VERSION"; - case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: - return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; + case IAVF_ERR_NVM: + return "IAVF_ERR_NVM"; + case IAVF_ERR_NVM_CHECKSUM: + return "IAVF_ERR_NVM_CHECKSUM"; + case IAVF_ERR_PHY: + return "IAVF_ERR_PHY"; + case IAVF_ERR_CONFIG: + return "IAVF_ERR_CONFIG"; + case IAVF_ERR_PARAM: + return "IAVF_ERR_PARAM"; + case IAVF_ERR_MAC_TYPE: + return "IAVF_ERR_MAC_TYPE"; + case IAVF_ERR_UNKNOWN_PHY: + return "IAVF_ERR_UNKNOWN_PHY"; + case IAVF_ERR_LINK_SETUP: + return "IAVF_ERR_LINK_SETUP"; + case IAVF_ERR_ADAPTER_STOPPED: + return "IAVF_ERR_ADAPTER_STOPPED"; + case IAVF_ERR_INVALID_MAC_ADDR: + return "IAVF_ERR_INVALID_MAC_ADDR"; + case IAVF_ERR_DEVICE_NOT_SUPPORTED: + return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; + case IAVF_ERR_MASTER_REQUESTS_PENDING: + return "IAVF_ERR_MASTER_REQUESTS_PENDING"; + case IAVF_ERR_INVALID_LINK_SETTINGS: + return "IAVF_ERR_INVALID_LINK_SETTINGS"; + case IAVF_ERR_AUTONEG_NOT_COMPLETE: + return "IAVF_ERR_AUTONEG_NOT_COMPLETE"; + case IAVF_ERR_RESET_FAILED: + return "IAVF_ERR_RESET_FAILED"; + case IAVF_ERR_SWFW_SYNC: + return "IAVF_ERR_SWFW_SYNC"; + case IAVF_ERR_NO_AVAILABLE_VSI: + return "IAVF_ERR_NO_AVAILABLE_VSI"; + case IAVF_ERR_NO_MEMORY: + return "IAVF_ERR_NO_MEMORY"; + case IAVF_ERR_BAD_PTR: + return "IAVF_ERR_BAD_PTR"; + case IAVF_ERR_RING_FULL: + return "IAVF_ERR_RING_FULL"; + case IAVF_ERR_INVALID_PD_ID: + return "IAVF_ERR_INVALID_PD_ID"; + case IAVF_ERR_INVALID_QP_ID: + return "IAVF_ERR_INVALID_QP_ID"; + case IAVF_ERR_INVALID_CQ_ID: + return "IAVF_ERR_INVALID_CQ_ID"; + case IAVF_ERR_INVALID_CEQ_ID: + return "IAVF_ERR_INVALID_CEQ_ID"; + case IAVF_ERR_INVALID_AEQ_ID: + return "IAVF_ERR_INVALID_AEQ_ID"; + case IAVF_ERR_INVALID_SIZE: + return "IAVF_ERR_INVALID_SIZE"; + case IAVF_ERR_INVALID_ARP_INDEX: + return "IAVF_ERR_INVALID_ARP_INDEX"; + case IAVF_ERR_INVALID_FPM_FUNC_ID: + return "IAVF_ERR_INVALID_FPM_FUNC_ID"; + case IAVF_ERR_QP_INVALID_MSG_SIZE: + return "IAVF_ERR_QP_INVALID_MSG_SIZE"; + case IAVF_ERR_QP_TOOMANY_WRS_POSTED: + return "IAVF_ERR_QP_TOOMANY_WRS_POSTED"; + case IAVF_ERR_INVALID_FRAG_COUNT: + return "IAVF_ERR_INVALID_FRAG_COUNT"; + case IAVF_ERR_QUEUE_EMPTY: + return "IAVF_ERR_QUEUE_EMPTY"; + case IAVF_ERR_INVALID_ALIGNMENT: + return "IAVF_ERR_INVALID_ALIGNMENT"; + case IAVF_ERR_FLUSHED_QUEUE: + return "IAVF_ERR_FLUSHED_QUEUE"; + case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: + return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX"; + case IAVF_ERR_INVALID_IMM_DATA_SIZE: + return "IAVF_ERR_INVALID_IMM_DATA_SIZE"; + case IAVF_ERR_TIMEOUT: + return "IAVF_ERR_TIMEOUT"; + case IAVF_ERR_OPCODE_MISMATCH: + return "IAVF_ERR_OPCODE_MISMATCH"; + case IAVF_ERR_CQP_COMPL_ERROR: + return "IAVF_ERR_CQP_COMPL_ERROR"; + case IAVF_ERR_INVALID_VF_ID: + return "IAVF_ERR_INVALID_VF_ID"; + case IAVF_ERR_INVALID_HMCFN_ID: + return "IAVF_ERR_INVALID_HMCFN_ID"; + case IAVF_ERR_BACKING_PAGE_ERROR: + return "IAVF_ERR_BACKING_PAGE_ERROR"; + case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: + return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE"; + case IAVF_ERR_INVALID_PBLE_INDEX: + return "IAVF_ERR_INVALID_PBLE_INDEX"; + case IAVF_ERR_INVALID_SD_INDEX: + return "IAVF_ERR_INVALID_SD_INDEX"; + case IAVF_ERR_INVALID_PAGE_DESC_INDEX: + return "IAVF_ERR_INVALID_PAGE_DESC_INDEX"; + case IAVF_ERR_INVALID_SD_TYPE: + return "IAVF_ERR_INVALID_SD_TYPE"; + case IAVF_ERR_MEMCPY_FAILED: + return "IAVF_ERR_MEMCPY_FAILED"; + case IAVF_ERR_INVALID_HMC_OBJ_INDEX: + return "IAVF_ERR_INVALID_HMC_OBJ_INDEX"; + case IAVF_ERR_INVALID_HMC_OBJ_COUNT: + return "IAVF_ERR_INVALID_HMC_OBJ_COUNT"; + case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: + return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT"; + case IAVF_ERR_SRQ_ENABLED: + return "IAVF_ERR_SRQ_ENABLED"; + case IAVF_ERR_ADMIN_QUEUE_ERROR: + return "IAVF_ERR_ADMIN_QUEUE_ERROR"; + case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: + return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT"; + case IAVF_ERR_BUF_TOO_SHORT: + return "IAVF_ERR_BUF_TOO_SHORT"; + case IAVF_ERR_ADMIN_QUEUE_FULL: + return "IAVF_ERR_ADMIN_QUEUE_FULL"; + case IAVF_ERR_ADMIN_QUEUE_NO_WORK: + return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; + case IAVF_ERR_BAD_IWARP_CQE: + return "IAVF_ERR_BAD_IWARP_CQE"; + case IAVF_ERR_NVM_BLANK_MODE: + return "IAVF_ERR_NVM_BLANK_MODE"; + case IAVF_ERR_NOT_IMPLEMENTED: + return "IAVF_ERR_NOT_IMPLEMENTED"; + case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: + return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED"; + case IAVF_ERR_DIAG_TEST_FAILED: + return "IAVF_ERR_DIAG_TEST_FAILED"; + case IAVF_ERR_NOT_READY: + return "IAVF_ERR_NOT_READY"; + case IAVF_NOT_SUPPORTED: + return "IAVF_NOT_SUPPORTED"; + case IAVF_ERR_FIRMWARE_API_VERSION: + return "IAVF_ERR_FIRMWARE_API_VERSION"; + case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); @@ -260,7 +260,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err) void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, void *buffer, u16 buf_len) { - struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc; u8 *buf = (u8 *)buffer; if ((!(mask & hw->debug_mask)) || !desc) @@ -327,17 +327,17 @@ bool iavf_check_asq_alive(struct iavf_hw *hw) * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well. **/ -iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) +enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) { - struct i40e_aq_desc desc; - struct i40e_aqc_queue_shutdown *cmd = - (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - iavf_status status; + struct iavf_aq_desc desc; + struct iavf_aqc_queue_shutdown *cmd = + (struct iavf_aqc_queue_shutdown *)&desc.params.raw; + enum iavf_status status; - iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown); + iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown); if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING); status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); return status; @@ -354,43 +354,43 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) * * Internal function to get or set RSS look up table **/ -static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) +static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) { - iavf_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_lut *cmd_resp = - (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + enum iavf_status status; + struct iavf_aq_desc desc; + struct iavf_aqc_get_set_rss_lut *cmd_resp = + (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw; if (set) iavf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_lut); + iavf_aqc_opc_set_rss_lut); else iavf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_lut); + iavf_aqc_opc_get_rss_lut); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID); if (pf_lut) cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); else cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); @@ -407,8 +407,8 @@ static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, * * get the RSS lookup table, PF or VSI type **/ -iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) +enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) { return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false); @@ -424,8 +424,8 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id, * * set the RSS lookup table, PF or VSI type **/ -iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) +enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) { return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); } @@ -439,33 +439,33 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, * * get the RSS key per VSI **/ -static +static enum iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, + struct iavf_aqc_get_set_rss_key_data *key, bool set) { - iavf_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_key *cmd_resp = - (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; - u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + enum iavf_status status; + struct iavf_aq_desc desc; + struct iavf_aqc_get_set_rss_key *cmd_resp = + (struct iavf_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data); if (set) iavf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_key); + iavf_aqc_opc_set_rss_key); else iavf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_key); + iavf_aqc_opc_get_rss_key); /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); cmd_resp->vsi_id = cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID); status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); @@ -479,8 +479,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, * @key: pointer to key info struct * **/ -iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) +enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct iavf_aqc_get_set_rss_key_data *key) { return iavf_aq_get_set_rss_key(hw, vsi_id, key, false); } @@ -493,8 +493,8 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id, * * set the RSS key per VSI **/ -iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) +enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, + struct iavf_aqc_get_set_rss_key_data *key) { return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); } @@ -515,7 +515,7 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, * IF NOT iavf_ptype_lookup[ptype].known * THEN * Packet is unknown - * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum iavf_rx_l2_ptype to decode the packet type @@ -877,24 +877,25 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = { * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for * completion before returning. **/ -iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, - enum virtchnl_ops v_opcode, - iavf_status v_retval, u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details) +enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + enum iavf_status v_retval, + u8 *msg, u16 msglen, + struct iavf_asq_cmd_details *cmd_details) { - struct i40e_asq_cmd_details details; - struct i40e_aq_desc desc; - iavf_status status; + struct iavf_asq_cmd_details details; + struct iavf_aq_desc desc; + enum iavf_status status; - iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI); desc.cookie_high = cpu_to_le32(v_opcode); desc.cookie_low = cpu_to_le32(v_retval); if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF - | I40E_AQ_FLAG_RD)); - if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF + | IAVF_AQ_FLAG_RD)); + if (msglen > IAVF_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB); desc.datalen = cpu_to_le16(msglen); } if (!cmd_details) { @@ -948,7 +949,7 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw, * as none will be forthcoming. Immediately after calling this function, * the admin queue should be shut down and (optionally) reinitialized. **/ -iavf_status iavf_vf_reset(struct iavf_hw *hw) +enum iavf_status iavf_vf_reset(struct iavf_hw *hw) { return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 9f87304109fe..dad3eec8ccd8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -280,10 +280,10 @@ static int iavf_get_link_ksettings(struct net_device *netdev, cmd->base.port = PORT_NONE; /* Set speed and duplex */ switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: + case IAVF_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; break; - case I40E_LINK_SPEED_25GB: + case IAVF_LINK_SPEED_25GB: #ifdef SPEED_25000 cmd->base.speed = SPEED_25000; #else @@ -291,16 +291,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev, "Speed is 25G, display not supported by this version of ethtool.\n"); #endif break; - case I40E_LINK_SPEED_20GB: + case IAVF_LINK_SPEED_20GB: cmd->base.speed = SPEED_20000; break; - case I40E_LINK_SPEED_10GB: + case IAVF_LINK_SPEED_10GB: cmd->base.speed = SPEED_10000; break; - case I40E_LINK_SPEED_1GB: + case IAVF_LINK_SPEED_1GB: cmd->base.speed = SPEED_1000; break; - case I40E_LINK_SPEED_100MB: + case IAVF_LINK_SPEED_100MB: cmd->base.speed = SPEED_100; break; default: @@ -510,7 +510,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) if (changed_flags & IAVF_FLAG_LEGACY_RX) { if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); } } @@ -622,7 +622,7 @@ static int iavf_set_ringparam(struct net_device *netdev, if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); } return 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 4569d69a2b55..9d2b50964a08 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -14,6 +14,8 @@ static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); static int iavf_close(struct net_device *netdev); +static int iavf_init_get_resources(struct iavf_adapter *adapter); +static int iavf_check_reset_complete(struct iavf_hw *hw); char iavf_driver_name[] = "iavf"; static const char iavf_driver_string[] = @@ -57,7 +59,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver") MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); -static struct workqueue_struct *iavf_wq; +static const struct net_device_ops iavf_netdev_ops; +struct workqueue_struct *iavf_wq; /** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code @@ -66,14 +69,14 @@ static struct workqueue_struct *iavf_wq; * @size: size of memory requested * @alignment: what to align the allocation to **/ -iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, - struct iavf_dma_mem *mem, - u64 size, u32 alignment) +enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem, + u64 size, u32 alignment) { struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem) - return I40E_ERR_PARAM; + return IAVF_ERR_PARAM; mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, @@ -81,7 +84,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, if (mem->va) return 0; else - return I40E_ERR_NO_MEMORY; + return IAVF_ERR_NO_MEMORY; } /** @@ -89,12 +92,13 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) +enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, + struct iavf_dma_mem *mem) { struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; if (!mem || !mem->va) - return I40E_ERR_PARAM; + return IAVF_ERR_PARAM; dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, (dma_addr_t)mem->pa); return 0; @@ -106,11 +110,11 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem) * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, - struct iavf_virt_mem *mem, u32 size) +enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem, u32 size) { if (!mem) - return I40E_ERR_PARAM; + return IAVF_ERR_PARAM; mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); @@ -118,7 +122,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, if (mem->va) return 0; else - return I40E_ERR_NO_MEMORY; + return IAVF_ERR_NO_MEMORY; } /** @@ -126,10 +130,11 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem) +enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, + struct iavf_virt_mem *mem) { if (!mem) - return I40E_ERR_PARAM; + return IAVF_ERR_PARAM; /* it's ok to kfree a NULL pointer */ kfree(mem->va); @@ -168,7 +173,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) if (!(adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); } } @@ -287,7 +292,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) rd32(hw, IAVF_VFINT_ICR0_ENA1); /* schedule work on the private workqueue */ - schedule_work(&adapter->adminq_task); + queue_work(iavf_wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -657,14 +662,13 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) f = iavf_find_vlan(adapter, vlan); if (!f) { - f = kzalloc(sizeof(*f), GFP_KERNEL); + f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) goto clearout; f->vlan = vlan; - INIT_LIST_HEAD(&f->list); - list_add(&f->list, &adapter->vlan_filter_list); + list_add_tail(&f->list, &adapter->vlan_filter_list); f->add = true; adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; } @@ -979,7 +983,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); } /** @@ -1043,7 +1047,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); } /** @@ -1227,8 +1231,8 @@ out: **/ static int iavf_config_rss_aq(struct iavf_adapter *adapter) { - struct i40e_aqc_get_set_rss_key_data *rss_key = - (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; + struct iavf_aqc_get_set_rss_key_data *rss_key = + (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; struct iavf_hw *hw = &adapter->hw; int ret = 0; @@ -1532,136 +1536,66 @@ err: } /** - * iavf_watchdog_timer - Periodic call-back timer - * @data: pointer to adapter disguised as unsigned long - **/ -static void iavf_watchdog_timer(struct timer_list *t) -{ - struct iavf_adapter *adapter = from_timer(adapter, t, - watchdog_timer); - - schedule_work(&adapter->watchdog_task); - /* timer will be rescheduled in watchdog task */ -} - -/** - * iavf_watchdog_task - Periodic call-back task - * @work: pointer to work_struct + * iavf_process_aq_command - process aq_required flags + * and sends aq command + * @adapter: pointer to iavf adapter structure + * + * Returns 0 on success + * Returns error code if no command was sent + * or error code if the command failed. **/ -static void iavf_watchdog_task(struct work_struct *work) +static int iavf_process_aq_command(struct iavf_adapter *adapter) { - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - watchdog_task); - struct iavf_hw *hw = &adapter->hw; - u32 reg_val; - - if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) - goto restart_watchdog; - - if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { - reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & - IAVF_VFGEN_RSTAT_VFR_STATE_MASK; - if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || - (reg_val == VIRTCHNL_VFR_COMPLETED)) { - /* A chance for redemption! */ - dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - schedule_delayed_work(&adapter->init_task, 10); - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and - * gets everything set up again, it'll restart the - * watchdog for us. Down, boy. Sit. Stay. Woof. - */ - return; - } - adapter->aq_required = 0; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - goto watchdog_done; - } - - if ((adapter->state < __IAVF_DOWN) || - (adapter->flags & IAVF_FLAG_RESET_PENDING)) - goto watchdog_done; - - /* check for reset */ - reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; - if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) { - adapter->state = __IAVF_RESETTING; - adapter->flags |= IAVF_FLAG_RESET_PENDING; - dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); - schedule_work(&adapter->reset_task); - adapter->aq_required = 0; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - goto watchdog_done; - } - - /* Process admin queue tasks. After init, everything gets done - * here so we don't race on the admin queue. - */ - if (adapter->current_op) { - if (!iavf_asq_done(hw)) { - dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); - iavf_send_api_ver(adapter); - } - goto watchdog_done; - } - if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) { - iavf_send_vf_config_msg(adapter); - goto watchdog_done; - } - + if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) + return iavf_send_vf_config_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { iavf_map_queues(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { iavf_add_ether_addrs(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { iavf_add_vlans(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { iavf_del_ether_addrs(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { iavf_del_vlans(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { iavf_enable_vlan_stripping(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { iavf_disable_vlan_stripping(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { iavf_configure_queues(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { iavf_enable_queues(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { @@ -1669,81 +1603,414 @@ static void iavf_watchdog_task(struct work_struct *work) * PF, so we don't have to set current_op as we will * not get a response through the ARQ. */ - iavf_init_rss(adapter); adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { iavf_get_hena(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { iavf_set_hena(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { iavf_set_rss_key(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { iavf_set_rss_lut(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); - goto watchdog_done; + return 0; } if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { iavf_enable_channels(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { iavf_disable_channels(adapter); - goto watchdog_done; + return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { iavf_add_cloud_filter(adapter); - goto watchdog_done; + return 0; } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { + iavf_del_cloud_filter(adapter); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { + iavf_add_cloud_filter(adapter); + return 0; + } + return -EAGAIN; +} + +/** + * iavf_startup - first step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_STARTUP driver state. + * When success the state is changed to __IAVF_INIT_VERSION_CHECK + * when fails it returns -EAGAIN + **/ +static int iavf_startup(struct iavf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct iavf_hw *hw = &adapter->hw; + int err; + + WARN_ON(adapter->state != __IAVF_STARTUP); + + /* driver loaded, probe complete */ + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + err = iavf_set_mac_type(hw); + if (err) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); + goto err; + } + + err = iavf_check_reset_complete(hw); + if (err) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", + err); + goto err; + } + hw->aq.num_arq_entries = IAVF_AQ_LEN; + hw->aq.num_asq_entries = IAVF_AQ_LEN; + hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; + + err = iavf_init_adminq(hw); + if (err) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); + goto err; + } + err = iavf_send_api_ver(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + iavf_shutdown_adminq(hw); + goto err; + } + adapter->state = __IAVF_INIT_VERSION_CHECK; +err: + return err; +} + +/** + * iavf_init_version_check - second step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_INIT_VERSION_CHECK driver state. + * When success the state is changed to __IAVF_INIT_GET_RESOURCES + * when fails it returns -EAGAIN + **/ +static int iavf_init_version_check(struct iavf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct iavf_hw *hw = &adapter->hw; + int err = -EAGAIN; + + WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); + + if (!iavf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed\n"); + iavf_shutdown_adminq(hw); + adapter->state = __IAVF_STARTUP; + goto err; + } + + /* aq msg sent, awaiting reply */ + err = iavf_verify_api_ver(adapter); + if (err) { + if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + err = iavf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); + goto err; + } + err = iavf_send_vf_config_msg(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", + err); + goto err; + } + adapter->state = __IAVF_INIT_GET_RESOURCES; + +err: + return err; +} + +/** + * iavf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __IAVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __IAVF_DOWN + * when fails it returns -EAGAIN + **/ +static int iavf_init_get_resources(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct iavf_hw *hw = &adapter->hw; + int err = 0, bufsz; + + WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { + bufsz = sizeof(struct virtchnl_vf_resource) + + (IAVF_MAX_VF_VSI * + sizeof(struct virtchnl_vsi_resource)); + adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + err = iavf_get_vf_config(adapter); + if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { + err = iavf_send_vf_config_msg(adapter); + goto err; + } else if (err == IAVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + iavf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return 0; + } + if (err) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); + goto err_alloc; + } + + if (iavf_process_config(adapter)) + goto err_alloc; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &iavf_netdev_ops; + iavf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + /* MTU range: 68 - 9710 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = IAVF_DEFAULT_TXD; + adapter->rx_desc_count = IAVF_DEFAULT_RXD; + err = iavf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + iavf_map_rings_to_vectors(adapter); + if (adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; + + err = iavf_request_misc_irq(adapter); + if (err) + goto err_sw_init; + + netif_carrier_off(netdev); + adapter->link_up = false; + + /* set the semaphore to prevent any callbacks after device registration + * up to time when state of driver will be set to __IAVF_DOWN + */ + rtnl_lock(); + if (!adapter->netdev_registered) { + err = register_netdevice(netdev); + if (err) { + rtnl_unlock(); + goto err_register; + } + } + + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + if (CLIENT_ALLOWED(adapter)) { + err = iavf_lan_add_device(adapter); + if (err) { + rtnl_unlock(); + dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", + err); + } + } + dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); + if (netdev->features & NETIF_F_GRO) + dev_info(&pdev->dev, "GRO is enabled\n"); + + adapter->state = __IAVF_DOWN; + set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); + rtnl_unlock(); + + iavf_misc_irq_enable(adapter); + wake_up(&adapter->down_waitqueue); + + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; + if (RSS_AQ(adapter)) + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; + else + iavf_init_rss(adapter); + + return err; +err_mem: + iavf_free_rss(adapter); +err_register: + iavf_free_misc_irq(adapter); +err_sw_init: + iavf_reset_interrupt_capability(adapter); +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + return err; +} + +/** + * iavf_watchdog_task - Periodic call-back task + * @work: pointer to work_struct + **/ +static void iavf_watchdog_task(struct work_struct *work) +{ + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, + watchdog_task.work); + struct iavf_hw *hw = &adapter->hw; + u32 reg_val; + + if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) + adapter->state = __IAVF_COMM_FAILED; + + switch (adapter->state) { + case __IAVF_COMM_FAILED: + reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK; + if (reg_val == VIRTCHNL_VFR_VFACTIVE || + reg_val == VIRTCHNL_VFR_COMPLETED) { + /* A chance for redemption! */ + dev_err(&adapter->pdev->dev, + "Hardware came out of reset. Attempting reinit.\n"); + adapter->state = __IAVF_STARTUP; + adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; + queue_delayed_work(iavf_wq, &adapter->init_task, 10); + clear_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section); + /* Don't reschedule the watchdog, since we've restarted + * the init task. When init_task contacts the PF and + * gets everything set up again, it'll restart the + * watchdog for us. Down, boy. Sit. Stay. Woof. + */ + return; + } + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + clear_bit(__IAVF_IN_CRITICAL_TASK, + &adapter->crit_section); + queue_delayed_work(iavf_wq, + &adapter->watchdog_task, + msecs_to_jiffies(10)); goto watchdog_done; + case __IAVF_RESETTING: + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + return; + case __IAVF_DOWN: + case __IAVF_DOWN_PENDING: + case __IAVF_TESTING: + case __IAVF_RUNNING: + if (adapter->current_op) { + if (!iavf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, + "Admin queue timeout\n"); + iavf_send_api_ver(adapter); + } + } else { + if (!iavf_process_aq_command(adapter) && + adapter->state == __IAVF_RUNNING) + iavf_request_stats(adapter); + } + break; + case __IAVF_REMOVE: + clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; + default: + goto restart_watchdog; } - schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); + /* check for hw reset */ + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; + if (!reg_val) { + adapter->state = __IAVF_RESETTING; + adapter->flags |= IAVF_FLAG_RESET_PENDING; + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); + queue_work(iavf_wq, &adapter->reset_task); + goto watchdog_done; + } - if (adapter->state == __IAVF_RUNNING) - iavf_request_stats(adapter); + schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); watchdog_done: - if (adapter->state == __IAVF_RUNNING) + if (adapter->state == __IAVF_RUNNING || + adapter->state == __IAVF_COMM_FAILED) iavf_detect_recover_hung(&adapter->vsi); clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: - if (adapter->state == __IAVF_REMOVE) - return; if (adapter->aq_required) - mod_timer(&adapter->watchdog_timer, - jiffies + msecs_to_jiffies(20)); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(20)); else - mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); - schedule_work(&adapter->adminq_task); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_work(iavf_wq, &adapter->adminq_task); } static void iavf_disable_vf(struct iavf_adapter *adapter) @@ -1967,7 +2234,7 @@ continue_reset: adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); - mod_timer(&adapter->watchdog_timer, jiffies + 2); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some * state here. @@ -2020,9 +2287,9 @@ static void iavf_adminq_task(struct work_struct *work) struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, adminq_task); struct iavf_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; + struct iavf_arq_event_info event; enum virtchnl_ops v_op; - iavf_status ret, v_ret; + enum iavf_status ret, v_ret; u32 val, oldval; u16 pending; @@ -2037,7 +2304,7 @@ static void iavf_adminq_task(struct work_struct *work) do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low); + v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ @@ -2239,22 +2506,22 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, int speed = 0, ret = 0; switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: + case IAVF_LINK_SPEED_40GB: speed = 40000; break; - case I40E_LINK_SPEED_25GB: + case IAVF_LINK_SPEED_25GB: speed = 25000; break; - case I40E_LINK_SPEED_20GB: + case IAVF_LINK_SPEED_20GB: speed = 20000; break; - case I40E_LINK_SPEED_10GB: + case IAVF_LINK_SPEED_10GB: speed = 10000; break; - case I40E_LINK_SPEED_1GB: + case IAVF_LINK_SPEED_1GB: speed = 1000; break; - case I40E_LINK_SPEED_100MB: + case IAVF_LINK_SPEED_100MB: speed = 100; break; default: @@ -2432,14 +2699,14 @@ exit: /** * iavf_parse_cls_flower - Parse tc flower filters provided by kernel * @adapter: board private structure - * @cls_flower: pointer to struct tc_cls_flower_offload + * @cls_flower: pointer to struct flow_cls_offload * @filter: pointer to cloud filter structure */ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, - struct tc_cls_flower_offload *f, + struct flow_cls_offload *f, struct iavf_cloud_filter *filter) { - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; @@ -2508,7 +2775,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } @@ -2518,7 +2785,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } @@ -2553,7 +2820,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); @@ -2577,7 +2844,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } @@ -2587,13 +2854,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); @@ -2614,7 +2881,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } /* src and dest IPv6 address should not be LOOPBACK @@ -2624,7 +2891,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -2649,7 +2916,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } @@ -2659,7 +2926,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return IAVF_ERR_CONFIG; } } if (match.key->dst) { @@ -2704,10 +2971,10 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, /** * iavf_configure_clsflower - Add tc flower filters * @adapter: board private structure - * @cls_flower: Pointer to struct tc_cls_flower_offload + * @cls_flower: Pointer to struct flow_cls_offload */ static int iavf_configure_clsflower(struct iavf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); struct iavf_cloud_filter *filter = NULL; @@ -2783,10 +3050,10 @@ static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, /** * iavf_delete_clsflower - Remove tc flower filters * @adapter: board private structure - * @cls_flower: Pointer to struct tc_cls_flower_offload + * @cls_flower: Pointer to struct flow_cls_offload */ static int iavf_delete_clsflower(struct iavf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { struct iavf_cloud_filter *filter = NULL; int err = 0; @@ -2810,17 +3077,17 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, * @type_data: offload data */ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { - case TC_CLSFLOWER_REPLACE: + case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_DESTROY: + case FLOW_CLS_DESTROY: return iavf_delete_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_STATS: + case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; @@ -2846,34 +3113,7 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -/** - * iavf_setup_tc_block - register callbacks for tc - * @netdev: network interface device structure - * @f: tc offload data - * - * This function registers block callbacks for tc - * offloads - **/ -static int iavf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct iavf_adapter *adapter = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} +static LIST_HEAD(iavf_block_cb_list); /** * iavf_setup_tc - configure multiple traffic classes @@ -2889,11 +3129,16 @@ static int iavf_setup_tc_block(struct net_device *dev, static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { + struct iavf_adapter *adapter = netdev_priv(netdev); + switch (type) { case TC_SETUP_QDISC_MQPRIO: return __iavf_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: - return iavf_setup_tc_block(netdev, type_data); + return flow_block_cb_setup_simple(type_data, + &iavf_block_cb_list, + iavf_setup_tc_block_cb, + adapter, adapter, true); default: return -EOPNOTSUPP; } @@ -2908,7 +3153,7 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, + * handler is registered with the OS, the watchdog is started, * and the stack is notified that the interface is ready. **/ static int iavf_open(struct net_device *netdev) @@ -3020,7 +3265,7 @@ static int iavf_close(struct net_device *netdev) status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __IAVF_DOWN, - msecs_to_jiffies(200)); + msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); return 0; @@ -3043,7 +3288,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } adapter->flags |= IAVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); return 0; } @@ -3348,217 +3593,41 @@ int iavf_process_config(struct iavf_adapter *adapter) static void iavf_init_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - init_task.work); - struct net_device *netdev = adapter->netdev; + struct iavf_adapter, + init_task.work); struct iavf_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err, bufsz; switch (adapter->state) { case __IAVF_STARTUP: - /* driver loaded, probe complete */ - adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - err = iavf_set_mac_type(hw); - if (err) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", - err); - goto err; - } - err = iavf_check_reset_complete(hw); - if (err) { - dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); - goto err; - } - hw->aq.num_arq_entries = IAVF_AQ_LEN; - hw->aq.num_asq_entries = IAVF_AQ_LEN; - hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; - - err = iavf_init_adminq(hw); - if (err) { - dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", - err); - goto err; - } - err = iavf_send_api_ver(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - iavf_shutdown_adminq(hw); - goto err; - } - adapter->state = __IAVF_INIT_VERSION_CHECK; - goto restart; + if (iavf_startup(adapter) < 0) + goto init_failed; + break; case __IAVF_INIT_VERSION_CHECK: - if (!iavf_asq_done(hw)) { - dev_err(&pdev->dev, "Admin queue command never completed\n"); - iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; - goto err; - } - - /* aq msg sent, awaiting reply */ - err = iavf_verify_api_ver(adapter); - if (err) { - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - err = iavf_send_api_ver(adapter); - else - dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", - adapter->pf_version.major, - adapter->pf_version.minor, - VIRTCHNL_VERSION_MAJOR, - VIRTCHNL_VERSION_MINOR); - goto err; - } - err = iavf_send_vf_config_msg(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send config request (%d)\n", - err); - goto err; - } - adapter->state = __IAVF_INIT_GET_RESOURCES; - goto restart; - case __IAVF_INIT_GET_RESOURCES: - /* aq msg sent, awaiting reply */ - if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) - goto err; - } - err = iavf_get_vf_config(adapter); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - err = iavf_send_vf_config_msg(adapter); - goto err; - } else if (err == I40E_ERR_PARAM) { - /* We only get ERR_PARAM if the device is in a very bad - * state or if we've been disabled for previous bad - * behavior. Either way, we're done now. - */ - iavf_shutdown_adminq(hw); - dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return; - } - if (err) { - dev_err(&pdev->dev, "Unable to get VF config (%d)\n", - err); - goto err_alloc; - } - adapter->state = __IAVF_INIT_SW; + if (iavf_init_version_check(adapter) < 0) + goto init_failed; break; + case __IAVF_INIT_GET_RESOURCES: + if (iavf_init_get_resources(adapter) < 0) + goto init_failed; + return; default: - goto err_alloc; - } - - if (iavf_process_config(adapter)) - goto err_alloc; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - - adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; - - netdev->netdev_ops = &iavf_netdev_ops; - iavf_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - - /* MTU range: 68 - 9710 */ - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; - - if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", - adapter->hw.mac.addr); - eth_hw_addr_random(netdev); - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - } else { - adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - } - - timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0); - mod_timer(&adapter->watchdog_timer, jiffies + 1); - - adapter->tx_desc_count = IAVF_DEFAULT_TXD; - adapter->rx_desc_count = IAVF_DEFAULT_RXD; - err = iavf_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - iavf_map_rings_to_vectors(adapter); - if (adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; - - err = iavf_request_misc_irq(adapter); - if (err) - goto err_sw_init; - - netif_carrier_off(netdev); - adapter->link_up = false; - - if (!adapter->netdev_registered) { - err = register_netdev(netdev); - if (err) - goto err_register; - } - - adapter->netdev_registered = true; - - netif_tx_stop_all_queues(netdev); - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_add_device(adapter); - if (err) - dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", - err); + goto init_failed; } - dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); - if (netdev->features & NETIF_F_GRO) - dev_info(&pdev->dev, "GRO is enabled\n"); - - adapter->state = __IAVF_DOWN; - set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - iavf_misc_irq_enable(adapter); - wake_up(&adapter->down_waitqueue); - - adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); - adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) - goto err_mem; - - if (RSS_AQ(adapter)) { - adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); - } else { - iavf_init_rss(adapter); - } - return; -restart: - schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); + queue_delayed_work(iavf_wq, &adapter->init_task, + msecs_to_jiffies(30)); return; -err_mem: - iavf_free_rss(adapter); -err_register: - iavf_free_misc_irq(adapter); -err_sw_init: - iavf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; -err: - /* Things went into the weeds, so try again later */ +init_failed: if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); + dev_err(&adapter->pdev->dev, + "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); adapter->state = __IAVF_STARTUP; - schedule_delayed_work(&adapter->init_task, HZ * 5); + queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); return; } - schedule_delayed_work(&adapter->init_task, HZ); + queue_delayed_work(iavf_wq, &adapter->init_task, HZ); } /** @@ -3683,11 +3752,11 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, iavf_reset_task); INIT_WORK(&adapter->adminq_task, iavf_adminq_task); - INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task); + INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); - schedule_delayed_work(&adapter->init_task, - msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + queue_delayed_work(iavf_wq, &adapter->init_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); @@ -3783,7 +3852,7 @@ static int iavf_resume(struct pci_dev *pdev) return err; } - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); netif_device_attach(netdev); @@ -3843,8 +3912,7 @@ static void iavf_remove(struct pci_dev *pdev) iavf_reset_interrupt_capability(adapter); iavf_free_q_vectors(adapter); - if (adapter->watchdog_timer.function) - del_timer_sync(&adapter->watchdog_timer); + cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->adminq_task); diff --git a/drivers/net/ethernet/intel/iavf/iavf_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h index e6e0b0328706..a452ce90679a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_osdep.h +++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h @@ -44,9 +44,12 @@ struct iavf_virt_mem { #define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s) #define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m) -#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__) -extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) - __attribute__ ((format(gnu_printf, 3, 4))); +#define iavf_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("iavf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) -typedef enum iavf_status_code iavf_status; #endif /* _IAVF_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index d6685103af39..edebfbbcffdc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -16,39 +16,40 @@ */ /* adminq functions */ -iavf_status iavf_init_adminq(struct iavf_hw *hw); -iavf_status iavf_shutdown_adminq(struct iavf_hw *hw); -void i40e_adminq_init_ring_data(struct iavf_hw *hw); -iavf_status iavf_clean_arq_element(struct iavf_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); +enum iavf_status iavf_init_adminq(struct iavf_hw *hw); +enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw); +void iavf_adminq_init_ring_data(struct iavf_hw *hw); +enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, + struct iavf_arq_event_info *e, + u16 *events_pending); +enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, + struct iavf_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct iavf_asq_cmd_details *cmd_details); bool iavf_asq_done(struct iavf_hw *hw); /* debug function for adminq */ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, void *buffer, u16 buf_len); -void i40e_idle_aq(struct iavf_hw *hw); +void iavf_idle_aq(struct iavf_hw *hw); void iavf_resume_aq(struct iavf_hw *hw); bool iavf_check_asq_alive(struct iavf_hw *hw); -iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); -const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err); -const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err); +enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading); +const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err); +const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err); -iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); +enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid, + struct iavf_aqc_get_set_rss_key_data *key); +enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, + struct iavf_aqc_get_set_rss_key_data *key); -iavf_status iavf_set_mac_type(struct iavf_hw *hw); +enum iavf_status iavf_set_mac_type(struct iavf_hw *hw); extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; @@ -59,9 +60,10 @@ static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) void iavf_vf_parse_hw_config(struct iavf_hw *hw, struct virtchnl_vf_resource *msg); -iavf_status iavf_vf_reset(struct iavf_hw *hw); -iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, - enum virtchnl_ops v_opcode, - iavf_status v_retval, u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); +enum iavf_status iavf_vf_reset(struct iavf_hw *hw); +enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, + enum virtchnl_ops v_opcode, + enum iavf_status v_retval, + u8 *msg, u16 msglen, + struct iavf_asq_cmd_details *cmd_details); #endif /* _IAVF_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 46742fab7b8c..46e3d1f6b604 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -5,74 +5,74 @@ #define _IAVF_STATUS_H_ /* Error Codes */ -enum iavf_status_code { - I40E_SUCCESS = 0, - I40E_ERR_NVM = -1, - I40E_ERR_NVM_CHECKSUM = -2, - I40E_ERR_PHY = -3, - I40E_ERR_CONFIG = -4, - I40E_ERR_PARAM = -5, - I40E_ERR_MAC_TYPE = -6, - I40E_ERR_UNKNOWN_PHY = -7, - I40E_ERR_LINK_SETUP = -8, - I40E_ERR_ADAPTER_STOPPED = -9, - I40E_ERR_INVALID_MAC_ADDR = -10, - I40E_ERR_DEVICE_NOT_SUPPORTED = -11, - I40E_ERR_MASTER_REQUESTS_PENDING = -12, - I40E_ERR_INVALID_LINK_SETTINGS = -13, - I40E_ERR_AUTONEG_NOT_COMPLETE = -14, - I40E_ERR_RESET_FAILED = -15, - I40E_ERR_SWFW_SYNC = -16, - I40E_ERR_NO_AVAILABLE_VSI = -17, - I40E_ERR_NO_MEMORY = -18, - I40E_ERR_BAD_PTR = -19, - I40E_ERR_RING_FULL = -20, - I40E_ERR_INVALID_PD_ID = -21, - I40E_ERR_INVALID_QP_ID = -22, - I40E_ERR_INVALID_CQ_ID = -23, - I40E_ERR_INVALID_CEQ_ID = -24, - I40E_ERR_INVALID_AEQ_ID = -25, - I40E_ERR_INVALID_SIZE = -26, - I40E_ERR_INVALID_ARP_INDEX = -27, - I40E_ERR_INVALID_FPM_FUNC_ID = -28, - I40E_ERR_QP_INVALID_MSG_SIZE = -29, - I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, - I40E_ERR_INVALID_FRAG_COUNT = -31, - I40E_ERR_QUEUE_EMPTY = -32, - I40E_ERR_INVALID_ALIGNMENT = -33, - I40E_ERR_FLUSHED_QUEUE = -34, - I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, - I40E_ERR_INVALID_IMM_DATA_SIZE = -36, - I40E_ERR_TIMEOUT = -37, - I40E_ERR_OPCODE_MISMATCH = -38, - I40E_ERR_CQP_COMPL_ERROR = -39, - I40E_ERR_INVALID_VF_ID = -40, - I40E_ERR_INVALID_HMCFN_ID = -41, - I40E_ERR_BACKING_PAGE_ERROR = -42, - I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, - I40E_ERR_INVALID_PBLE_INDEX = -44, - I40E_ERR_INVALID_SD_INDEX = -45, - I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, - I40E_ERR_INVALID_SD_TYPE = -47, - I40E_ERR_MEMCPY_FAILED = -48, - I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, - I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, - I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, - I40E_ERR_SRQ_ENABLED = -52, - I40E_ERR_ADMIN_QUEUE_ERROR = -53, - I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, - I40E_ERR_BUF_TOO_SHORT = -55, - I40E_ERR_ADMIN_QUEUE_FULL = -56, - I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, - I40E_ERR_BAD_IWARP_CQE = -58, - I40E_ERR_NVM_BLANK_MODE = -59, - I40E_ERR_NOT_IMPLEMENTED = -60, - I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, - I40E_ERR_DIAG_TEST_FAILED = -62, - I40E_ERR_NOT_READY = -63, - I40E_NOT_SUPPORTED = -64, - I40E_ERR_FIRMWARE_API_VERSION = -65, - I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +enum iavf_status { + IAVF_SUCCESS = 0, + IAVF_ERR_NVM = -1, + IAVF_ERR_NVM_CHECKSUM = -2, + IAVF_ERR_PHY = -3, + IAVF_ERR_CONFIG = -4, + IAVF_ERR_PARAM = -5, + IAVF_ERR_MAC_TYPE = -6, + IAVF_ERR_UNKNOWN_PHY = -7, + IAVF_ERR_LINK_SETUP = -8, + IAVF_ERR_ADAPTER_STOPPED = -9, + IAVF_ERR_INVALID_MAC_ADDR = -10, + IAVF_ERR_DEVICE_NOT_SUPPORTED = -11, + IAVF_ERR_MASTER_REQUESTS_PENDING = -12, + IAVF_ERR_INVALID_LINK_SETTINGS = -13, + IAVF_ERR_AUTONEG_NOT_COMPLETE = -14, + IAVF_ERR_RESET_FAILED = -15, + IAVF_ERR_SWFW_SYNC = -16, + IAVF_ERR_NO_AVAILABLE_VSI = -17, + IAVF_ERR_NO_MEMORY = -18, + IAVF_ERR_BAD_PTR = -19, + IAVF_ERR_RING_FULL = -20, + IAVF_ERR_INVALID_PD_ID = -21, + IAVF_ERR_INVALID_QP_ID = -22, + IAVF_ERR_INVALID_CQ_ID = -23, + IAVF_ERR_INVALID_CEQ_ID = -24, + IAVF_ERR_INVALID_AEQ_ID = -25, + IAVF_ERR_INVALID_SIZE = -26, + IAVF_ERR_INVALID_ARP_INDEX = -27, + IAVF_ERR_INVALID_FPM_FUNC_ID = -28, + IAVF_ERR_QP_INVALID_MSG_SIZE = -29, + IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30, + IAVF_ERR_INVALID_FRAG_COUNT = -31, + IAVF_ERR_QUEUE_EMPTY = -32, + IAVF_ERR_INVALID_ALIGNMENT = -33, + IAVF_ERR_FLUSHED_QUEUE = -34, + IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35, + IAVF_ERR_INVALID_IMM_DATA_SIZE = -36, + IAVF_ERR_TIMEOUT = -37, + IAVF_ERR_OPCODE_MISMATCH = -38, + IAVF_ERR_CQP_COMPL_ERROR = -39, + IAVF_ERR_INVALID_VF_ID = -40, + IAVF_ERR_INVALID_HMCFN_ID = -41, + IAVF_ERR_BACKING_PAGE_ERROR = -42, + IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + IAVF_ERR_INVALID_PBLE_INDEX = -44, + IAVF_ERR_INVALID_SD_INDEX = -45, + IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46, + IAVF_ERR_INVALID_SD_TYPE = -47, + IAVF_ERR_MEMCPY_FAILED = -48, + IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49, + IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50, + IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51, + IAVF_ERR_SRQ_ENABLED = -52, + IAVF_ERR_ADMIN_QUEUE_ERROR = -53, + IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54, + IAVF_ERR_BUF_TOO_SHORT = -55, + IAVF_ERR_ADMIN_QUEUE_FULL = -56, + IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57, + IAVF_ERR_BAD_IWARP_CQE = -58, + IAVF_ERR_NVM_BLANK_MODE = -59, + IAVF_ERR_NOT_IMPLEMENTED = -60, + IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, + IAVF_ERR_DIAG_TEST_FAILED = -62, + IAVF_ERR_NOT_READY = -63, + IAVF_NOT_SUPPORTED = -64, + IAVF_ERR_FIRMWARE_API_VERSION = -65, + IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; #endif /* _IAVF_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index 1474f5539751..1058e68a02b4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -17,8 +17,8 @@ /* See trace-events-sample.h for a detailed description of why this * guard clause is different from most normal include files. */ -#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) -#define _I40E_TRACE_H_ +#if !defined(_IAVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _IAVF_TRACE_H_ #include <linux/tracepoint.h> diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 06d1509d57f7..0cca1b589b56 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -190,7 +190,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, struct iavf_ring *tx_ring, int napi_budget) { - u16 i = tx_ring->next_to_clean; + int i = tx_ring->next_to_clean; struct iavf_tx_buffer *tx_buf; struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; @@ -379,19 +379,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) unsigned int divisor; switch (q_vector->adapter->link_speed) { - case I40E_LINK_SPEED_40GB: + case IAVF_LINK_SPEED_40GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; break; - case I40E_LINK_SPEED_25GB: - case I40E_LINK_SPEED_20GB: + case IAVF_LINK_SPEED_25GB: + case IAVF_LINK_SPEED_20GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; break; default: - case I40E_LINK_SPEED_10GB: + case IAVF_LINK_SPEED_10GB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; break; - case I40E_LINK_SPEED_1GB: - case I40E_LINK_SPEED_100MB: + case IAVF_LINK_SPEED_1GB: + case IAVF_LINK_SPEED_100MB: divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; break; } @@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring, unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); #endif + if (!size) + return; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); @@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, { struct iavf_rx_buffer *rx_buffer; + if (!size) + return NULL; + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); @@ -1290,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, unsigned int size) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else @@ -1299,7 +1305,10 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, unsigned int headlen; struct sk_buff *skb; + if (!rx_buffer) + return NULL; /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetch(va); #if L1_CACHE_BYTES < 128 prefetch(va + L1_CACHE_BYTES); @@ -1354,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer, unsigned int size) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + void *va; #if (PAGE_SIZE < 8192) unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; #else @@ -1363,7 +1372,10 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb; + if (!rx_buffer) + return NULL; /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetch(va); #if L1_CACHE_BYTES < 128 prefetch(va + L1_CACHE_BYTES); @@ -1398,6 +1410,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, struct iavf_rx_buffer *rx_buffer) { + if (!rx_buffer) + return; + if (iavf_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ iavf_reuse_rx_page(rx_ring, rx_buffer); @@ -1496,11 +1511,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) * verified the descriptor has been written back. */ dma_rmb(); +#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) + if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) + break; size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; - if (!size) - break; iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_buffer = iavf_get_rx_buffer(rx_ring, size); @@ -1516,7 +1532,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - rx_buffer->pagecnt_bias++; + if (rx_buffer) + rx_buffer->pagecnt_bias++; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index ca89583613fb..7190a40c540c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -7,7 +7,7 @@ #include "iavf_status.h" #include "iavf_osdep.h" #include "iavf_register.h" -#include "i40e_adminq.h" +#include "iavf_adminq.h" #include "iavf_devids.h" #define IAVF_RXQ_CTX_DBUFF_SHIFT 7 @@ -21,7 +21,7 @@ /* forward declaration */ struct iavf_hw; -typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *); +typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *); /* Data type manipulation macros. */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index e64751da0921..d49d58a6de80 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -22,7 +22,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) { struct iavf_hw *hw = &adapter->hw; - iavf_status err; + enum iavf_status err; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */ @@ -41,7 +41,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter, * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully - * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. **/ int iavf_send_api_ver(struct iavf_adapter *adapter) { @@ -60,16 +60,16 @@ int iavf_send_api_ver(struct iavf_adapter *adapter) * * Compare API versions with the PF. Must be called after admin queue is * initialized. Returns 0 if API versions match, -EIO if they do not, - * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors + * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated. **/ int iavf_verify_api_ver(struct iavf_adapter *adapter) { struct virtchnl_version_info *pf_vvi; struct iavf_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; + struct iavf_arq_event_info event; enum virtchnl_ops op; - iavf_status err; + enum iavf_status err; event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -92,7 +92,7 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter) } - err = (iavf_status)le32_to_cpu(event.desc.cookie_low); + err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; @@ -123,7 +123,7 @@ out: * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was - * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not. **/ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) { @@ -189,9 +189,9 @@ static void iavf_validate_num_queues(struct iavf_adapter *adapter) int iavf_get_vf_config(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; + struct iavf_arq_event_info event; enum virtchnl_ops op; - iavf_status err; + enum iavf_status err; u16 len; len = sizeof(struct virtchnl_vf_resource) + @@ -216,7 +216,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) break; } - err = (iavf_status)le32_to_cpu(event.desc.cookie_low); + err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); /* some PFs send more queues than we should have so validate that @@ -242,7 +242,8 @@ void iavf_configure_queues(struct iavf_adapter *adapter) struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; - int i, len, max_frame = IAVF_MAX_RXBUFFER; + int i, max_frame = IAVF_MAX_RXBUFFER; + size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -251,8 +252,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; - len = sizeof(struct virtchnl_vsi_queue_config_info) + - (sizeof(struct virtchnl_queue_pair_info) * pairs); + len = struct_size(vqci, qpair, pairs); vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -351,8 +351,9 @@ void iavf_map_queues(struct iavf_adapter *adapter) { struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; - int v_idx, q_vectors, len; struct iavf_q_vector *q_vector; + int v_idx, q_vectors; + size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -364,9 +365,7 @@ void iavf_map_queues(struct iavf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; - len = sizeof(struct virtchnl_irq_map_info) + - (adapter->num_msix_vectors * - sizeof(struct virtchnl_vector_map)); + len = struct_size(vimi, vecmap, adapter->num_msix_vectors); vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; @@ -416,7 +415,7 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num) return -EBUSY; } - vfres.num_queue_pairs = num; + vfres.num_queue_pairs = min_t(int, num, num_online_cpus()); adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; @@ -433,9 +432,10 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num) void iavf_add_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; - int len, i = 0, count = 0; struct iavf_mac_filter *f; + int i = 0, count = 0; bool more = false; + size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -457,15 +457,13 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); + len = struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); + len = struct_size(veal, list, count); more = true; } @@ -505,8 +503,9 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) { struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f, *ftmp; - int len, i = 0, count = 0; + int i = 0, count = 0; bool more = false; + size_t len; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -528,15 +527,13 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); + len = struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr); - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); + len = struct_size(veal, list, count); more = true; } veal = kzalloc(len, GFP_ATOMIC); @@ -938,22 +935,22 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) } switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: + case IAVF_LINK_SPEED_40GB: speed = "40 G"; break; - case I40E_LINK_SPEED_25GB: + case IAVF_LINK_SPEED_25GB: speed = "25 G"; break; - case I40E_LINK_SPEED_20GB: + case IAVF_LINK_SPEED_20GB: speed = "20 G"; break; - case I40E_LINK_SPEED_10GB: + case IAVF_LINK_SPEED_10GB: speed = "10 G"; break; - case I40E_LINK_SPEED_1GB: + case IAVF_LINK_SPEED_1GB: speed = "1000 M"; break; - case I40E_LINK_SPEED_100MB: + case IAVF_LINK_SPEED_100MB: speed = "100 M"; break; default: @@ -973,7 +970,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) void iavf_enable_channels(struct iavf_adapter *adapter) { struct virtchnl_tc_info *vti = NULL; - u16 len; + size_t len; int i; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { @@ -983,9 +980,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter) return; } - len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) + - sizeof(struct virtchnl_tc_info); - + len = struct_size(vti, list, adapter->num_tc - 1); vti = kzalloc(len, GFP_KERNEL); if (!vti) return; @@ -1184,8 +1179,8 @@ void iavf_request_reset(struct iavf_adapter *adapter) * This function handles the reply messages. **/ void iavf_virtchnl_completion(struct iavf_adapter *adapter, - enum virtchnl_ops v_opcode, iavf_status v_retval, - u8 *msg, u16 msglen) + enum virtchnl_ops v_opcode, + enum iavf_status v_retval, u8 *msg, u16 msglen) { struct net_device *netdev = adapter->netdev; @@ -1238,7 +1233,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); - schedule_work(&adapter->reset_task); + queue_work(iavf_wq, &adapter->reset_task); } break; default: diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 792e6e42030e..9ee6b55553c0 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -44,15 +44,22 @@ extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 -#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE +#define ICE_MIN_NUM_DESC 64 #define ICE_MAX_NUM_DESC 8160 -/* set default number of Rx/Tx descriptors to the minimum between - * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page +#define ICE_DFLT_MIN_RX_DESC 512 +/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the + * number of descriptors to fill up an entire page is greater than or equal to + * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to + * ICE_DFLT_MIN_RX_DESC + */ +#define ICE_DFLT_NUM_RX_DESC \ + min_t(u16, ICE_MAX_NUM_DESC, \ + max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \ + ICE_REQ_DESC_MULTIPLE), \ + ICE_DFLT_MIN_RX_DESC)) +/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC + * and the number of descriptors to fill up an entire page */ -#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ - ALIGN(PAGE_SIZE / \ - sizeof(union ice_32byte_rx_desc), \ - ICE_REQ_DESC_MULTIPLE)) #define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ ALIGN(PAGE_SIZE / \ sizeof(struct ice_tx_desc), \ @@ -160,7 +167,7 @@ struct ice_tc_cfg { struct ice_res_tracker { u16 num_entries; - u16 search_hint; + u16 end; u16 list[1]; }; @@ -182,6 +189,7 @@ struct ice_sw { }; enum ice_state { + __ICE_TESTING, __ICE_DOWN, __ICE_NEEDS_RESTART, __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ @@ -244,8 +252,7 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; int num_q_vectors; - int sw_base_vector; /* Irq base for OS reserved vectors */ - int hw_base_vector; /* HW (absolute) index of a vector */ + int base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -277,10 +284,10 @@ struct ice_vsi { struct list_head tmp_sync_list; /* MAC filters to be synced */ struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ - u8 irqs_ready; - u8 current_isup; /* Sync 'link up' logging */ - u8 stat_offsets_loaded; - u8 vlan_ena; + u8 irqs_ready:1; + u8 current_isup:1; /* Sync 'link up' logging */ + u8 stat_offsets_loaded:1; + u8 vlan_ena:1; /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -330,7 +337,7 @@ enum ice_pf_flags { ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, - ICE_FLAG_DISABLE_FW_LLDP, + ICE_FLAG_ENABLE_FW_LLDP, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -340,10 +347,12 @@ struct ice_pf { /* OS reserved IRQ details */ struct msix_entry *msix_entries; - struct ice_res_tracker *sw_irq_tracker; - - /* HW reserved Interrupts for this PF */ - struct ice_res_tracker *hw_irq_tracker; + struct ice_res_tracker *irq_tracker; + /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the + * number of MSIX vectors needed for all SR-IOV VFs from the number of + * MSIX vectors allowed on this PF. + */ + u16 sriov_base_vector; struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ @@ -365,10 +374,8 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ u32 msg_enable; u32 hw_csum_rx_error; - u32 sw_oicr_idx; /* Other interrupt cause SW vector index */ + u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ - u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ - u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ @@ -384,7 +391,7 @@ struct ice_pf { struct ice_hw_port_stats stats; struct ice_hw_port_stats stats_prev; struct ice_hw hw; - u8 stat_prev_loaded; /* has previous stats been loaded */ + u8 stat_prev_loaded:1; /* has previous stats been loaded */ #ifdef CONFIG_DCB u16 dcbx_cap; #endif /* CONFIG_DCB */ @@ -392,6 +399,7 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; + u32 sw_int_count; }; struct ice_netdev_priv { @@ -409,7 +417,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? q_vector->reg_idx : - ((struct ice_pf *)hw->back)->hw_oicr_idx; + ((struct ice_pf *)hw->back)->oicr_idx; int itr = ICE_ITR_NONE; u32 val; @@ -444,17 +452,22 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) return NULL; } +int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); +int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); +int ice_vsi_cfg(struct ice_vsi *vsi); +struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); -void ice_napi_del(struct ice_vsi *vsi); #ifdef CONFIG_DCB int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked); void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked); #endif /* CONFIG_DCB */ +int ice_open(struct net_device *netdev); +int ice_stop(struct net_device *netdev); #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 6ef083002f5b..765e3c2ed045 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -35,8 +35,8 @@ struct ice_aqc_get_ver { /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { -#define ICE_AQC_DRIVER_UNLOADING BIT(0) __le32 driver_unloading; +#define ICE_AQC_DRIVER_UNLOADING BIT(0) u8 reserved[12]; }; @@ -120,11 +120,9 @@ struct ice_aqc_manage_mac_read { #define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) #define ICE_AQC_MAN_MAC_READ_S 4 #define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) - u8 lport_num; - u8 lport_num_valid; -#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0) + u8 rsvd[2]; u8 num_addr; /* Used in response */ - u8 reserved[3]; + u8 rsvd1[3]; __le32 addr_high; __le32 addr_low; }; @@ -140,7 +138,7 @@ struct ice_aqc_manage_mac_read_resp { /* Manage MAC address, write command - direct (0x0108) */ struct ice_aqc_manage_mac_write { - u8 port_num; + u8 rsvd; u8 flags; #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) @@ -920,6 +918,8 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_EN_LINK BIT(3) #define ICE_AQC_PHY_AN_MODE BIT(4) #define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5) +#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) +#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0) u8 low_power_ctrl; #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) __le16 eee_cap; @@ -932,6 +932,7 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) __le16 eeer_value; u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ + u8 phy_fw_ver[8]; u8 link_fec_options; #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) @@ -940,6 +941,8 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) +#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0) + u8 rsvd1; /* Byte 35 reserved */ u8 extended_compliance_code; #define ICE_MODULE_TYPE_TOTAL_BYTE 3 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; @@ -954,13 +957,14 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 u8 qualified_module_count; + u8 rsvd2[7]; /* Bytes 47:41 reserved */ #define ICE_AQC_QUAL_MOD_COUNT_MAX 16 struct { u8 v_oui[3]; - u8 rsvd1; + u8 rsvd3; u8 v_part[16]; __le32 v_rev; - __le64 rsvd8; + __le64 rsvd4; } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; }; @@ -1062,6 +1066,7 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) #define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) #define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) +#define ICE_AQ_FEC_MASK ICE_M(0x7, 0) /* Pacing Config */ #define ICE_AQ_CFG_PACING_S 3 #define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) @@ -1112,6 +1117,14 @@ struct ice_aqc_set_event_mask { u8 reserved1[6]; }; +/* Set MAC Loopback command (direct 0x0620) */ +struct ice_aqc_set_mac_lb { + u8 lb_mode; +#define ICE_AQ_MAC_LB_EN BIT(0) +#define ICE_AQ_MAC_LB_OSC_CLK BIT(1) + u8 reserved[15]; +}; + /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; @@ -1145,6 +1158,17 @@ struct ice_aqc_nvm { __le32 addr_low; }; +/* NVM Checksum Command (direct, 0x0706) */ +struct ice_aqc_nvm_checksum { + u8 flags; +#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0) +#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1) + u8 rsvd; + __le16 checksum; /* Used only by response */ +#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA + u8 rsvd2[12]; +}; + /** * Send to PF command (indirect 0x0801) ID is only used by PF * @@ -1249,7 +1273,7 @@ struct ice_aqc_get_cee_dcb_cfg_resp { }; /* Set Local LLDP MIB (indirect 0x0A08) - * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + * Used to replace the local MIB of a given LLDP agent. e.g. DCBX */ struct ice_aqc_lldp_set_local_mib { u8 type; @@ -1266,7 +1290,7 @@ struct ice_aqc_lldp_set_local_mib { }; /* Stop/Start LLDP Agent (direct 0x0A09) - * Used for stopping/starting specific LLDP agent. e.g. DCBx. + * Used for stopping/starting specific LLDP agent. e.g. DCBX. * The same structure is used for the response, with the command field * being used as the status field. */ @@ -1539,6 +1563,7 @@ struct ice_aq_desc { struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_port_ets port_ets; struct ice_aqc_nvm nvm; + struct ice_aqc_nvm_checksum nvm_checksum; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; @@ -1554,6 +1579,7 @@ struct ice_aq_desc { struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; + struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; @@ -1642,10 +1668,12 @@ enum ice_adminq_opc { ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, + ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_set_port_id_led = 0x06E9, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + ice_aqc_opc_nvm_checksum = 0x0706, /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, @@ -1671,6 +1699,7 @@ enum ice_adminq_opc { /* debug commands */ ice_aqc_opc_fw_logging = 0xFF09, + ice_aqc_opc_fw_logging_info = 0xFF10, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index da7878529929..2e0731c1e1a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -51,9 +51,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) */ void ice_dev_onetime_setup(struct ice_hw *hw) { - /* configure Rx - set non pxe mode */ - wr32(hw, GLLAN_RCTL_0, 0x1); - #define MBX_PF_VT_PFALLOC 0x00231E80 /* set VFs per PF */ wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF)); @@ -307,6 +304,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, hw_link_info->an_info = link_data.an_info; hw_link_info->ext_info = link_data.ext_info; hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); + hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; + hw_link_info->topo_media_conflict = link_data.topo_media_conflict; hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; /* update fc info */ @@ -476,6 +475,49 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) /** + * ice_get_fw_log_cfg - get FW logging configuration + * @hw: pointer to the HW struct + */ +static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) +{ + struct ice_aqc_fw_logging_data *config; + struct ice_aq_desc desc; + enum ice_status status; + u16 size; + + size = ICE_FW_LOG_DESC_SIZE_MAX; + config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); + if (!config) + return ICE_ERR_NO_MEMORY; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, config, size, NULL); + if (!status) { + u16 i; + + /* Save FW logging information into the HW structure */ + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + u16 v, m, flgs; + + v = le16_to_cpu(config->entry[i]); + m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; + flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; + + if (m < ICE_AQC_FW_LOG_ID_MAX) + hw->fw_log.evnts[m].cur = flgs; + } + } + + devm_kfree(ice_hw_to_dev(hw), config); + + return status; +} + +/** * ice_cfg_fw_log - configure FW logging * @hw: pointer to the HW struct * @enable: enable certain FW logging events if true, disable all if false @@ -529,6 +571,11 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) return 0; + /* Get current FW log settings */ + status = ice_get_fw_log_cfg(hw); + if (status) + return status; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); cmd = &desc.params.fw_logging; @@ -634,17 +681,17 @@ out: */ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) { - ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, + ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); + ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, le16_to_cpu(desc->datalen)); - ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); + ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); } /** * ice_get_itr_intrl_gran - determine int/intrl granularity * @hw: pointer to the HW struct * - * Determines the itr/intrl granularities based on the maximum aggregate + * Determines the ITR/intrl granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) @@ -815,6 +862,10 @@ err_unroll_cqinit: /** * ice_deinit_hw - unroll initialization operations done by ice_init_hw * @hw: pointer to the hardware structure + * + * This should be called only during nominal operation, not as a result of + * ice_init_hw() failing since ice_init_hw() will take care of unrolling + * applicable initializations if it fails for any reason. */ void ice_deinit_hw(struct ice_hw *hw) { @@ -1447,6 +1498,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, struct ice_hw_func_caps *func_p = NULL; struct ice_hw_dev_caps *dev_p = NULL; struct ice_hw_common_caps *caps; + char const *prefix; u32 i; if (!buf) @@ -1457,9 +1509,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (opc == ice_aqc_opc_list_dev_caps) { dev_p = &hw->dev_caps; caps = &dev_p->common_cap; + prefix = "dev cap"; } else if (opc == ice_aqc_opc_list_func_caps) { func_p = &hw->func_caps; caps = &func_p->common_cap; + prefix = "func cap"; } else { ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); return; @@ -1475,28 +1529,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Valid Functions = %d\n", + "%s: valid functions = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, - "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); + "%s: SR-IOV = %d\n", prefix, + caps->sr_iov_1_1); break; case ICE_AQC_CAPS_VF: if (dev_p) { dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: VFs exposed = %d\n", + "%s: VFs exposed = %d\n", prefix, dev_p->num_vfs_exposed); } else if (func_p) { func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: VFs allocated = %d\n", + "%s: VFs allocated = %d\n", prefix, func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, - "HW caps: VF base_id = %d\n", + "%s: VF base_id = %d\n", prefix, func_p->vf_base_id); } break; @@ -1504,69 +1559,69 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Dev.VSI cnt = %d\n", + "%s: num VSI alloc to host = %d\n", + prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Func.VSI cnt = %d\n", - number); + "%s: num guaranteed VSI (fw) = %d\n", + prefix, number); + ice_debug(hw, ICE_DBG_INIT, + "%s: num guaranteed VSI = %d\n", + prefix, func_p->guar_num_vsi); } break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: RSS table size = %d\n", + "%s: RSS table size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "HW caps: RSS table width = %d\n", + "%s: RSS table width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Num Rx Qs = %d\n", caps->num_rxq); + "%s: num Rx queues = %d\n", prefix, + caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Rx first queue ID = %d\n", + "%s: Rx first queue ID = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Num Tx Qs = %d\n", caps->num_txq); + "%s: num Tx queues = %d\n", prefix, + caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Tx first queue ID = %d\n", + "%s: Tx first queue ID = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: MSIX vector count = %d\n", + "%s: MSIX vector count = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "HW caps: MSIX first vector index = %d\n", + "%s: MSIX first vector index = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - if (dev_p) - ice_debug(hw, ICE_DBG_INIT, - "HW caps: Dev.MaxMTU = %d\n", - caps->max_mtu); - else if (func_p) - ice_debug(hw, ICE_DBG_INIT, - "HW caps: func.MaxMTU = %d\n", - caps->max_mtu); + ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + prefix, caps->max_mtu); break; default: ice_debug(hw, ICE_DBG_INIT, - "HW caps: Unknown capability[%d]: 0x%x\n", i, - cap); + "%s: unknown capability[%d]: 0x%x\n", prefix, + i, cap); break; } } @@ -1947,36 +2002,37 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, */ enum ice_status ice_update_link_info(struct ice_port_info *pi) { - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_phy_info *phy_info; + struct ice_link_status *li; enum ice_status status; - struct ice_hw *hw; if (!pi) return ICE_ERR_PARAM; - hw = pi->hw; - - pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); - if (!pcaps) - return ICE_ERR_NO_MEMORY; + li = &pi->phy.link_info; - phy_info = &pi->phy; status = ice_aq_get_link_info(pi, true, NULL, NULL); if (status) - goto out; + return status; + + if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_hw *hw; + + hw = pi->hw; + pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), + GFP_KERNEL); + if (!pcaps) + return ICE_ERR_NO_MEMORY; - if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, NULL); - if (status) - goto out; + if (!status) + memcpy(li->module_type, &pcaps->module_type, + sizeof(li->module_type)); - memcpy(phy_info->link_info.module_type, &pcaps->module_type, - sizeof(phy_info->link_info.module_type)); + devm_kfree(ice_hw_to_dev(hw), pcaps); } -out: - devm_kfree(ice_hw_to_dev(hw), pcaps); + return status; } @@ -2081,6 +2137,74 @@ out: } /** + * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @caps: PHY ability structure to copy date from + * @cfg: PHY configuration structure to copy data to + * + * Helper function to copy AQC PHY get ability data to PHY set configuration + * data structure + */ +void +ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg) +{ + if (!caps || !cfg) + return; + + cfg->phy_type_low = caps->phy_type_low; + cfg->phy_type_high = caps->phy_type_high; + cfg->caps = caps->caps; + cfg->low_power_ctrl = caps->low_power_ctrl; + cfg->eee_cap = caps->eee_cap; + cfg->eeer_value = caps->eeer_value; + cfg->link_fec_opt = caps->link_fec_options; +} + +/** + * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode + * @cfg: PHY configuration data to set FEC mode + * @fec: FEC mode to configure + * + * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC + * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps + * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling. + */ +void +ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) +{ + switch (fec) { + case ICE_FEC_BASER: + /* Clear auto FEC and RS bits, and AND BASE-R ability + * bits and OR request bits. + */ + cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | + ICE_AQC_PHY_FEC_25G_KR_REQ; + break; + case ICE_FEC_RS: + /* Clear auto FEC and BASE-R bits, and AND RS ability + * bits and OR request bits. + */ + cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | + ICE_AQC_PHY_FEC_25G_RS_544_REQ; + break; + case ICE_FEC_NONE: + /* Clear auto FEC and all FEC option bits. */ + cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; + break; + case ICE_FEC_AUTO: + /* AND auto FEC bit, and all caps bits. */ + cfg->caps &= ICE_AQC_PHY_CAPS_MASK; + break; + } +} + +/** * ice_get_link_status - get status of the HW network link * @pi: port information structure * @link_up: pointer to bool (true/false = linkup/linkdown) @@ -2169,6 +2293,29 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, } /** + * ice_aq_set_mac_loopback + * @hw: pointer to the HW struct + * @ena_lpbk: Enable or Disable loopback + * @cd: pointer to command details structure or NULL + * + * Enable/disable loopback on a given port + */ +enum ice_status +ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_mac_lb *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_mac_lb; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); + if (ena_lpbk) + cmd->lb_mode = ICE_AQ_MAC_LB_EN; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** * ice_aq_set_port_id_led * @pi: pointer to the port information * @is_orig_mode: is this LED set to original mode (by the net-list) @@ -2552,7 +2699,7 @@ do_aq: ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", vmvf_num, hw->adminq.sq_last_status); else - ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n", + ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", le16_to_cpu(qg_list[0].q_id[0]), hw->adminq.sq_last_status); } @@ -2924,7 +3071,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; - if (!num_queues) { /* if queue is disabled already yet the disable queue command * has to be sent to complete the VF reset, then call diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index f1ddebf45231..d1f8353fe6bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -9,6 +9,8 @@ #include "ice_switch.h" #include <linux/avf/virtchnl.h> +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); + void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); enum ice_status ice_init_hw(struct ice_hw *hw); @@ -84,7 +86,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); - +void +ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); +void +ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg); enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); @@ -95,6 +101,9 @@ enum ice_status ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status +ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); + +enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index cc8cb5fdcdc1..e91ac4df0242 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -439,7 +439,7 @@ do { \ /* free the buffer info list */ \ if ((qi)->ring.cmd_buf) \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ - /* free dma head */ \ + /* free DMA head */ \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ } while (0) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index e0585394d984..44945c2165d8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -35,7 +35,7 @@ enum ice_ctl_q { #define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ struct ice_ctl_q_ring { - void *dma_head; /* Virtual address to dma head */ + void *dma_head; /* Virtual address to DMA head */ struct ice_dma_mem desc_buf; /* descriptor ring memory */ void *cmd_buf; /* command buffer memory */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 8bbf48e04a1c..c2002ded65f6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -82,12 +82,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, * @hw: pointer to the HW struct * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown * False if LLDP Agent needs to be Stopped + * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across + * reboots * @cd: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ enum ice_status -ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop *cmd; @@ -100,17 +102,22 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, if (shutdown_lldp_agent) cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN; + if (persist) + cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_start_lldp * @hw: pointer to the HW struct + * @persist: True if Start of LLDP Agent needs to be persistent across reboots * @cd: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. (0x0A06) */ -enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd) +enum ice_status +ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; struct ice_aq_desc desc; @@ -121,6 +128,9 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd) cmd->command = ICE_AQ_LLDP_AGENT_START; + if (persist) + cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -163,7 +173,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, * * Get the DCBX status from the Firmware */ -u8 ice_get_dcbx_status(struct ice_hw *hw) +static u8 ice_get_dcbx_status(struct ice_hw *hw) { u32 reg; @@ -614,7 +624,8 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * * Parse DCB configuration from the LLDPDU */ -enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +static enum ice_status +ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; enum ice_status ret = 0; @@ -658,13 +669,13 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) /** * ice_aq_get_dcb_cfg * @hw: pointer to the HW struct - * @mib_type: mib type for the query + * @mib_type: MIB type for the query * @bridgetype: bridge type for the query (remote) * @dcbcfg: store for LLDPDU data * * Query DCB configuration from the firmware */ -static enum ice_status +enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { @@ -689,13 +700,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, } /** - * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW + * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW * @hw: pointer to the HW struct - * @start_dcbx_agent: True if DCBx Agent needs to be started - * False if DCBx Agent needs to be stopped - * @dcbx_agent_status: FW indicates back the DCBx agent status - * True if DCBx Agent is active - * False if DCBx Agent is stopped + * @start_dcbx_agent: True if DCBX Agent needs to be started + * False if DCBX Agent needs to be stopped + * @dcbx_agent_status: FW indicates back the DCBX agent status + * True if DCBX Agent is active + * False if DCBX Agent is stopped * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index e7d4416e3a66..522e1452abe2 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -120,8 +120,9 @@ struct ice_cee_app_prio { u8 prio_map; } __packed; -u8 ice_get_dcbx_status(struct ice_hw *hw); -enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg); +enum ice_status +ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, + struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_init_dcb(struct ice_hw *hw); @@ -131,9 +132,10 @@ ice_query_port_ets(struct ice_port_info *pi, struct ice_sq_cd *cmd_details); #ifdef CONFIG_DCB enum ice_status -ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); -enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); +enum ice_status +ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); @@ -144,6 +146,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, static inline enum ice_status ice_aq_stop_lldp(struct ice_hw __always_unused *hw, bool __always_unused shutdown_lldp_agent, + bool __always_unused persist, struct ice_sq_cd __always_unused *cd) { return 0; @@ -151,6 +154,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, static inline enum ice_status ice_aq_start_lldp(struct ice_hw __always_unused *hw, + bool __always_unused persist, struct ice_sq_cd __always_unused *cd) { return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 3e81af1884fc..fe88b127ca42 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -120,12 +120,14 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) tc_map = ICE_DFLT_TRAFFIC_CLASS; ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); - if (ret) + if (ret) { dev_err(&pf->pdev->dev, "Failed to config TC for VSI index: %d\n", pf->vsi[v]->idx); - else - ice_vsi_map_rings_to_vectors(pf->vsi[v]); + continue; + } + + ice_vsi_map_rings_to_vectors(pf->vsi[v]); } } @@ -133,8 +135,10 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) * ice_pf_dcb_cfg - Apply new DCB configuration * @pf: pointer to the PF struct * @new_cfg: DCBX config to apply + * @locked: is the RTNL held */ -static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) +static +int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) { struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; @@ -163,7 +167,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) /* avoid race conditions by holding the lock while disabling and * re-enabling the VSI */ - rtnl_lock(); + if (!locked) + rtnl_lock(); ice_pf_dis_all_vsi(pf, true); memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); @@ -192,7 +197,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) out: ice_pf_ena_all_vsi(pf, true); - rtnl_unlock(); + if (!locked) + rtnl_unlock(); devm_kfree(&pf->pdev->dev, old_cfg); return ret; } @@ -271,15 +277,16 @@ dcb_error: prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW; prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec)); - ice_pf_dcb_cfg(pf, prev_cfg); + ice_pf_dcb_cfg(pf, prev_cfg, false); devm_kfree(&pf->pdev->dev, prev_cfg); } /** * ice_dcb_init_cfg - set the initial DCB config in SW - * @pf: pf to apply config to + * @pf: PF to apply config to + * @locked: Is the RTNL held */ -static int ice_dcb_init_cfg(struct ice_pf *pf) +static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) { struct ice_dcbx_cfg *newcfg; struct ice_port_info *pi; @@ -294,7 +301,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf) memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); - if (ice_pf_dcb_cfg(pf, newcfg)) + if (ice_pf_dcb_cfg(pf, newcfg, locked)) ret = -EINVAL; devm_kfree(&pf->pdev->dev, newcfg); @@ -304,9 +311,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf) /** * ice_dcb_sw_default_config - Apply a default DCB config - * @pf: pf to apply config to + * @pf: PF to apply config to + * @locked: was this function called with RTNL held */ -static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) +static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *dcbcfg; @@ -338,7 +346,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) dcbcfg->app[0].priority = 3; dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; - ret = ice_pf_dcb_cfg(pf, dcbcfg); + ret = ice_pf_dcb_cfg(pf, dcbcfg, locked); devm_kfree(&pf->pdev->dev, dcbcfg); if (ret) return ret; @@ -348,9 +356,10 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) /** * ice_init_pf_dcb - initialize DCB for a PF - * @pf: pf to initiialize DCB for + * @pf: PF to initialize DCB for + * @locked: Was function called with RTNL held */ -int ice_init_pf_dcb(struct ice_pf *pf) +int ice_init_pf_dcb(struct ice_pf *pf, bool locked) { struct device *dev = &pf->pdev->dev; struct ice_port_info *port_info; @@ -360,33 +369,10 @@ int ice_init_pf_dcb(struct ice_pf *pf) port_info = hw->port_info; - /* check if device is DCB capable */ - if (!hw->func_caps.common_cap.dcb) { - dev_dbg(dev, "DCB not supported\n"); - return -EOPNOTSUPP; - } - - /* Best effort to put DCBx and LLDP into a good state */ - port_info->dcbx_status = ice_get_dcbx_status(hw); - if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE && - port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { - bool dcbx_status; - - /* Attempt to start LLDP engine. Ignore errors - * as this will error if it is already started - */ - ice_aq_start_lldp(hw, NULL); - - /* Attempt to start DCBX. Ignore errors as this - * will error if it is already started - */ - ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL); - } - err = ice_init_dcb(hw); if (err) { - /* FW LLDP not in usable state, default to SW DCBx/LLDP */ - dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n"); + /* FW LLDP is not active, default to SW DCBX/LLDP */ + dev_info(&pf->pdev->dev, "FW LLDP is not active\n"); hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; hw->port_info->is_sw_lldp = true; } @@ -398,15 +384,16 @@ int ice_init_pf_dcb(struct ice_pf *pf) if (port_info->is_sw_lldp) { sw_default = 1; dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); + clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + } else { + set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); } - if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { - sw_default = 1; + if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) dev_info(&pf->pdev->dev, "DCBX not started\n"); - } if (sw_default) { - err = ice_dcb_sw_dflt_cfg(pf); + err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { dev_err(&pf->pdev->dev, "Failed to set local DCB config %d\n", err); @@ -425,7 +412,7 @@ int ice_init_pf_dcb(struct ice_pf *pf) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - err = ice_dcb_init_cfg(pf); + err = ice_dcb_init_cfg(pf, locked); if (err) goto dcb_init_err; @@ -515,6 +502,55 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, } /** + * ice_dcb_need_recfg - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + */ +static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prio_table, + &old_cfg->etscfg.prio_table, + sizeof(new_cfg->etscfg.prio_table))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); + return need_reconfig; +} + +/** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf * @event: pointer to the admin queue receive event @@ -523,29 +559,95 @@ void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { - if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { - struct ice_dcbx_cfg *dcbcfg, *prev_cfg; - int err; - - prev_cfg = &pf->hw.port_info->local_dcbx_cfg; - dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg, - sizeof(*dcbcfg), GFP_KERNEL); - if (!dcbcfg) + struct ice_aqc_port_ets_elem buf = { 0 }; + struct ice_aqc_lldp_get_mib *mib; + struct ice_dcbx_cfg tmp_dcbx_cfg; + bool need_reconfig = false; + struct ice_port_info *pi; + u8 type; + int ret; + + /* Not DCB capable or capability disabled */ + if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))) + return; + + if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) { + dev_dbg(&pf->pdev->dev, + "MIB Change Event in HOST mode\n"); + return; + } + + pi = pf->hw.port_info; + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + /* Ignore if event is not for Nearest Bridge */ + type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & + ICE_AQ_LLDP_BRID_TYPE_M); + dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type); + if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) + return; + + /* Check MIB Type and return if event for Remote MIB update */ + type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + dev_dbg(&pf->pdev->dev, + "LLDP event mib type %s\n", type ? "remote" : "local"); + if (type == ICE_AQ_LLDP_MIB_REMOTE) { + /* Update the remote cached instance and return */ + ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, + &pi->remote_dcbx_cfg); + if (ret) { + dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n"); return; + } + } - err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg); - if (!err) - ice_pf_dcb_cfg(pf, dcbcfg); + /* store the old configuration */ + tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg; - devm_kfree(&pf->pdev->dev, dcbcfg); + /* Reset the old DCBX configuration data */ + memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg)); - /* Get updated DCBx data from firmware */ - err = ice_get_dcb_cfg(pf->hw.port_info); - if (err) - dev_err(&pf->pdev->dev, - "Failed to get DCB config\n"); - } else { + /* Get updated DCBX data from firmware */ + ret = ice_get_dcb_cfg(pf->hw.port_info); + if (ret) { + dev_err(&pf->pdev->dev, "Failed to get DCB config\n"); + return; + } + + /* No change detected in DCBX configs */ + if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, - "MIB Change Event in HOST mode\n"); + "No change detected in DCBX configuration.\n"); + return; + } + + need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, + &pi->local_dcbx_cfg); + if (!need_reconfig) + return; + + /* Enable DCB tagging only when more than one TC */ + if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { + dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); + set_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } + + rtnl_lock(); + ice_pf_dis_all_vsi(pf, true); + + ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + rtnl_unlock(); + return; + } + + /* changes in configuration update VSI */ + ice_pf_dcb_recfg(pf); + + ice_pf_ena_all_vsi(pf, true); + rtnl_unlock(); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index ca7b76faa03c..819081053ff5 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -14,7 +14,7 @@ void ice_dcb_rebuild(struct ice_pf *pf); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); -int ice_init_pf_dcb(struct ice_pf *pf); +int ice_init_pf_dcb(struct ice_pf *pf, bool locked); void ice_update_dcb_stats(struct ice_pf *pf); int ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, @@ -40,7 +40,8 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) return 1; } -static inline int ice_init_pf_dcb(struct ice_pf *pf) +static inline int +ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) { dev_dbg(&pf->pdev->dev, "DCB not supported\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1341fde8d53f..52083a63dee6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -45,22 +45,40 @@ static int ice_q_stats_len(struct net_device *netdev) ICE_VSI_STATS_LEN + ice_q_stats_len(n)) static const struct ice_stats ice_gstrings_vsi_stats[] = { - ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), - ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), + ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), - ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), + ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), - ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), + ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), - ICE_VSI_STAT("rx_discards", eth_stats.rx_discards), - ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), - ICE_VSI_STAT("tx_linearize", tx_linearize), + ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), + ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards), ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), + ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), + ICE_VSI_STAT("tx_linearize", tx_linearize), +}; + +enum ice_ethtool_test_id { + ICE_ETH_TEST_REG = 0, + ICE_ETH_TEST_EEPROM, + ICE_ETH_TEST_INTR, + ICE_ETH_TEST_LOOP, + ICE_ETH_TEST_LINK, }; +static const char ice_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "EEPROM test (offline)", + "Interrupt test (offline)", + "Loopback test (offline)", + "Link test (on/offline)", +}; + +#define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN) + /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they aren't. This device is capable of supporting multiple * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual @@ -71,45 +89,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { * is queried on the base PF netdev. */ static const struct ice_stats ice_gstrings_pf_stats[] = { - ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), - ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), - ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), - ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), - ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), - ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), - ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), - ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), - ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors), - ICE_PF_STAT("port.tx_size_64", stats.tx_size_64), - ICE_PF_STAT("port.rx_size_64", stats.rx_size_64), - ICE_PF_STAT("port.tx_size_127", stats.tx_size_127), - ICE_PF_STAT("port.rx_size_127", stats.rx_size_127), - ICE_PF_STAT("port.tx_size_255", stats.tx_size_255), - ICE_PF_STAT("port.rx_size_255", stats.rx_size_255), - ICE_PF_STAT("port.tx_size_511", stats.tx_size_511), - ICE_PF_STAT("port.rx_size_511", stats.rx_size_511), - ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023), - ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023), - ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522), - ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522), - ICE_PF_STAT("port.tx_size_big", stats.tx_size_big), - ICE_PF_STAT("port.rx_size_big", stats.rx_size_big), - ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx), - ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx), - ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), - ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), - ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), - ICE_PF_STAT("port.rx_undersize", stats.rx_undersize), - ICE_PF_STAT("port.rx_fragments", stats.rx_fragments), - ICE_PF_STAT("port.rx_oversize", stats.rx_oversize), - ICE_PF_STAT("port.rx_jabber", stats.rx_jabber), - ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), - ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors), - ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards), - ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors), - ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes), - ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults), - ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), + ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes), + ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes), + ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast), + ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast), + ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast), + ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast), + ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), + ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), + ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), + ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), + ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), + ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), + ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127), + ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255), + ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255), + ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511), + ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511), + ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023), + ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023), + ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522), + ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522), + ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big), + ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big), + ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx), + ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx), + ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx), + ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx), + ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down), + ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize), + ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments), + ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), + ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), + ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), + ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors), + ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), + ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), + ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), + ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), + ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), }; static const u32 ice_regs_dump_list[] = { @@ -120,6 +138,9 @@ static const u32 ice_regs_dump_list[] = { QINT_RQCTL(0), PFINT_OICR_ENA, QRX_ITR(0), + PF0INT_ITR_0(0), + PF0INT_ITR_1(0), + PF0INT_ITR_2(0), }; struct ice_priv_flag { @@ -134,7 +155,7 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), - ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP), + ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -278,6 +299,571 @@ out: return ret; } +/** + * ice_active_vfs - check if there are any active VFs + * @pf: board private structure + * + * Returns true if an active VF is found, otherwise returns false + */ +static bool ice_active_vfs(struct ice_pf *pf) +{ + struct ice_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return true; + return false; +} + +/** + * ice_link_test - perform a link test on a given net_device + * @netdev: network interface device structure + * + * This function performs one of the self-tests required by ethtool. + * Returns 0 on success, non-zero on failure. + */ +static u64 ice_link_test(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + enum ice_status status; + bool link_up = false; + + netdev_info(netdev, "link test\n"); + status = ice_get_link_status(np->vsi->port_info, &link_up); + if (status) { + netdev_err(netdev, "link query error, status = %d\n", status); + return 1; + } + + if (!link_up) + return 2; + + return 0; +} + +/** + * ice_eeprom_test - perform an EEPROM test on a given net_device + * @netdev: network interface device structure + * + * This function performs one of the self-tests required by ethtool. + * Returns 0 on success, non-zero on failure. + */ +static u64 ice_eeprom_test(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + netdev_info(netdev, "EEPROM test\n"); + return !!(ice_nvm_validate_checksum(&pf->hw)); +} + +/** + * ice_reg_pattern_test + * @hw: pointer to the HW struct + * @reg: reg to be tested + * @mask: bits to be touched + */ +static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) +{ + struct ice_pf *pf = (struct ice_pf *)hw->back; + static const u32 patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, + 0x00000000, 0xFFFFFFFF + }; + u32 val, orig_val; + int i; + + orig_val = rd32(hw, reg); + for (i = 0; i < ARRAY_SIZE(patterns); ++i) { + u32 pattern = patterns[i] & mask; + + wr32(hw, reg, pattern); + val = rd32(hw, reg); + if (val == pattern) + continue; + dev_err(&pf->pdev->dev, + "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" + , __func__, reg, pattern, val); + return 1; + } + + wr32(hw, reg, orig_val); + val = rd32(hw, reg); + if (val != orig_val) { + dev_err(&pf->pdev->dev, + "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" + , __func__, reg, orig_val, val); + return 1; + } + + return 0; +} + +/** + * ice_reg_test - perform a register test on a given net_device + * @netdev: network interface device structure + * + * This function performs one of the self-tests required by ethtool. + * Returns 0 on success, non-zero on failure. + */ +static u64 ice_reg_test(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_hw *hw = np->vsi->port_info->hw; + u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ? + hw->func_caps.common_cap.num_msix_vectors - 1 : 1; + struct ice_diag_reg_test_info { + u32 address; + u32 mask; + u32 elem_num; + u32 elem_size; + } ice_reg_list[] = { + {GLINT_ITR(0, 0), 0x00000fff, int_elements, + GLINT_ITR(0, 1) - GLINT_ITR(0, 0)}, + {GLINT_ITR(1, 0), 0x00000fff, int_elements, + GLINT_ITR(1, 1) - GLINT_ITR(1, 0)}, + {GLINT_ITR(0, 0), 0x00000fff, int_elements, + GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, + {GLINT_CTL, 0xffff0001, 1, 0} + }; + int i; + + netdev_dbg(netdev, "Register test\n"); + for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { + u32 j; + + for (j = 0; j < ice_reg_list[i].elem_num; ++j) { + u32 mask = ice_reg_list[i].mask; + u32 reg = ice_reg_list[i].address + + (j * ice_reg_list[i].elem_size); + + /* bail on failure (non-zero return) */ + if (ice_reg_pattern_test(hw, reg, mask)) + return 1; + } + } + + return 0; +} + +/** + * ice_lbtest_prepare_rings - configure Tx/Rx test rings + * @vsi: pointer to the VSI structure + * + * Function configures rings of a VSI for loopback test without + * enabling interrupts or informing the kernel about new queues. + * + * Returns 0 on success, negative on failure. + */ +static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) +{ + int status; + + status = ice_vsi_setup_tx_rings(vsi); + if (status) + goto err_setup_tx_ring; + + status = ice_vsi_setup_rx_rings(vsi); + if (status) + goto err_setup_rx_ring; + + status = ice_vsi_cfg(vsi); + if (status) + goto err_setup_rx_ring; + + status = ice_vsi_start_rx_rings(vsi); + if (status) + goto err_start_rx_ring; + + return status; + +err_start_rx_ring: + ice_vsi_free_rx_rings(vsi); +err_setup_rx_ring: + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); +err_setup_tx_ring: + ice_vsi_free_tx_rings(vsi); + + return status; +} + +/** + * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test + * @vsi: pointer to the VSI structure + * + * Function stops and frees VSI rings after a loopback test. + * Returns 0 on success, negative on failure. + */ +static int ice_lbtest_disable_rings(struct ice_vsi *vsi) +{ + int status; + + status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); + if (status) + netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n", + vsi->vsi_num, status); + + status = ice_vsi_stop_rx_rings(vsi); + if (status) + netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n", + vsi->vsi_num, status); + + ice_vsi_free_tx_rings(vsi); + ice_vsi_free_rx_rings(vsi); + + return status; +} + +/** + * ice_lbtest_create_frame - create test packet + * @pf: pointer to the PF structure + * @ret_data: allocated frame buffer + * @size: size of the packet data + * + * Function allocates a frame with a test pattern on specific offsets. + * Returns 0 on success, non-zero on failure. + */ +static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) +{ + u8 *data; + + if (!pf) + return -EINVAL; + + data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* Since the ethernet test frame should always be at least + * 64 bytes long, fill some octets in the payload with test data. + */ + memset(data, 0xFF, size); + data[32] = 0xDE; + data[42] = 0xAD; + data[44] = 0xBE; + data[46] = 0xEF; + + *ret_data = data; + + return 0; +} + +/** + * ice_lbtest_check_frame - verify received loopback frame + * @frame: pointer to the raw packet data + * + * Function verifies received test frame with a pattern. + * Returns true if frame matches the pattern, false otherwise. + */ +static bool ice_lbtest_check_frame(u8 *frame) +{ + /* Validate bytes of a frame under offsets chosen earlier */ + if (frame[32] == 0xDE && + frame[42] == 0xAD && + frame[44] == 0xBE && + frame[46] == 0xEF && + frame[48] == 0xFF) + return true; + + return false; +} + +/** + * ice_diag_send - send test frames to the test ring + * @tx_ring: pointer to the transmit ring + * @data: pointer to the raw packet data + * @size: size of the packet to send + * + * Function sends loopback packets on a test Tx ring. + */ +static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size) +{ + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + dma_addr_t dma; + u64 td_cmd; + + tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use]; + + dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return -EINVAL; + + tx_desc->buf_addr = cpu_to_le64(dma); + + /* These flags are required for a descriptor to be pushed out */ + td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); + tx_desc->cmd_type_offset_bsz = + cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | + (td_cmd << ICE_TXD_QW1_CMD_S) | + ((u64)0 << ICE_TXD_QW1_OFFSET_S) | + ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | + ((u64)0 << ICE_TXD_QW1_L2TAG1_S)); + + tx_buf->next_to_watch = tx_desc; + + /* Force memory write to complete before letting h/w know + * there are new descriptors to fetch. + */ + wmb(); + + tx_ring->next_to_use++; + if (tx_ring->next_to_use >= tx_ring->count) + tx_ring->next_to_use = 0; + + writel_relaxed(tx_ring->next_to_use, tx_ring->tail); + + /* Wait until the packets get transmitted to the receive queue. */ + usleep_range(1000, 2000); + dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE); + + return 0; +} + +#define ICE_LB_FRAME_SIZE 64 +/** + * ice_lbtest_receive_frames - receive and verify test frames + * @rx_ring: pointer to the receive ring + * + * Function receives loopback packets and verify their correctness. + * Returns number of received valid frames. + */ +static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) +{ + struct ice_rx_buf *rx_buf; + int valid_frames, i; + u8 *received_buf; + + valid_frames = 0; + + for (i = 0; i < rx_ring->count; i++) { + union ice_32b_rx_flex_desc *rx_desc; + + rx_desc = ICE_RX_DESC(rx_ring, i); + + if (!(rx_desc->wb.status_error0 & + cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) + continue; + + rx_buf = &rx_ring->rx_buf[i]; + received_buf = page_address(rx_buf->page); + + if (ice_lbtest_check_frame(received_buf)) + valid_frames++; + } + + return valid_frames; +} + +/** + * ice_loopback_test - perform a loopback test on a given net_device + * @netdev: network interface device structure + * + * This function performs one of the self-tests required by ethtool. + * Returns 0 on success, non-zero on failure. + */ +static u64 ice_loopback_test(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *orig_vsi = np->vsi, *test_vsi; + struct ice_pf *pf = orig_vsi->back; + struct ice_ring *tx_ring, *rx_ring; + u8 broadcast[ETH_ALEN], ret = 0; + int num_frames, valid_frames; + LIST_HEAD(tmp_list); + u8 *tx_frame; + int i; + + netdev_info(netdev, "loopback test\n"); + + test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); + if (!test_vsi) { + netdev_err(netdev, "Failed to create a VSI for the loopback test"); + return 1; + } + + test_vsi->netdev = netdev; + tx_ring = test_vsi->tx_rings[0]; + rx_ring = test_vsi->rx_rings[0]; + + if (ice_lbtest_prepare_rings(test_vsi)) { + ret = 2; + goto lbtest_vsi_close; + } + + if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) { + ret = 3; + goto lbtest_rings_dis; + } + + /* Enable MAC loopback in firmware */ + if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) { + ret = 4; + goto lbtest_mac_dis; + } + + /* Test VSI needs to receive broadcast packets */ + eth_broadcast_addr(broadcast); + if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) { + ret = 5; + goto lbtest_mac_dis; + } + + if (ice_add_mac(&pf->hw, &tmp_list)) { + ret = 6; + goto free_mac_list; + } + + if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { + ret = 7; + goto remove_mac_filters; + } + + num_frames = min_t(int, tx_ring->count, 32); + for (i = 0; i < num_frames; i++) { + if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) { + ret = 8; + goto lbtest_free_frame; + } + } + + valid_frames = ice_lbtest_receive_frames(rx_ring); + if (!valid_frames) + ret = 9; + else if (valid_frames != num_frames) + ret = 10; + +lbtest_free_frame: + devm_kfree(&pf->pdev->dev, tx_frame); +remove_mac_filters: + if (ice_remove_mac(&pf->hw, &tmp_list)) + netdev_err(netdev, "Could not remove MAC filter for the test VSI"); +free_mac_list: + ice_free_fltr_list(&pf->pdev->dev, &tmp_list); +lbtest_mac_dis: + /* Disable MAC loopback after the test is completed. */ + if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) + netdev_err(netdev, "Could not disable MAC loopback\n"); +lbtest_rings_dis: + if (ice_lbtest_disable_rings(test_vsi)) + netdev_err(netdev, "Could not disable test rings\n"); +lbtest_vsi_close: + test_vsi->netdev = NULL; + if (ice_vsi_release(test_vsi)) + netdev_err(netdev, "Failed to remove the test VSI"); + + return ret; +} + +/** + * ice_intr_test - perform an interrupt test on a given net_device + * @netdev: network interface device structure + * + * This function performs one of the self-tests required by ethtool. + * Returns 0 on success, non-zero on failure. + */ +static u64 ice_intr_test(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + u16 swic_old = pf->sw_int_count; + + netdev_info(netdev, "interrupt test\n"); + + wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx), + GLINT_DYN_CTL_SW_ITR_INDX_M | + GLINT_DYN_CTL_INTENA_MSK_M | + GLINT_DYN_CTL_SWINT_TRIG_M); + + usleep_range(1000, 2000); + return (swic_old == pf->sw_int_count); +} + +/** + * ice_self_test - handler function for performing a self-test by ethtool + * @netdev: network interface device structure + * @eth_test: ethtool_test structure + * @data: required by ethtool.self_test + * + * This function is called after invoking 'ethtool -t devname' command where + * devname is the name of the network device on which ethtool should operate. + * It performs a set of self-tests to check if a device works properly. + */ +static void +ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct ice_pf *pf = np->vsi->back; + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(netdev, "offline testing starting\n"); + + set_bit(__ICE_TESTING, pf->state); + + if (ice_active_vfs(pf)) { + dev_warn(&pf->pdev->dev, + "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); + data[ICE_ETH_TEST_REG] = 1; + data[ICE_ETH_TEST_EEPROM] = 1; + data[ICE_ETH_TEST_INTR] = 1; + data[ICE_ETH_TEST_LOOP] = 1; + data[ICE_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__ICE_TESTING, pf->state); + goto skip_ol_tests; + } + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + ice_stop(netdev); + + data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); + data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev); + data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev); + data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev); + data[ICE_ETH_TEST_REG] = ice_reg_test(netdev); + + if (data[ICE_ETH_TEST_LINK] || + data[ICE_ETH_TEST_EEPROM] || + data[ICE_ETH_TEST_LOOP] || + data[ICE_ETH_TEST_INTR] || + data[ICE_ETH_TEST_REG]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__ICE_TESTING, pf->state); + + if (if_running) { + int status = ice_open(netdev); + + if (status) { + dev_err(&pf->pdev->dev, + "Could not open device %s, err %d", + pf->int_name, status); + } + } + } else { + /* Online tests */ + netdev_info(netdev, "online testing starting\n"); + + data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); + if (data[ICE_ETH_TEST_LINK]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline only tests, not run in online; pass by default */ + data[ICE_ETH_TEST_REG] = 0; + data[ICE_ETH_TEST_EEPROM] = 0; + data[ICE_ETH_TEST_INTR] = 0; + data[ICE_ETH_TEST_LOOP] = 0; + } + +skip_ol_tests: + netdev_info(netdev, "testing finished\n"); +} + static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -295,17 +881,17 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ice_for_each_alloc_txq(vsi, i) { snprintf(p, ETH_GSTRING_LEN, - "tx-queue-%u.tx_packets", i); + "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } ice_for_each_alloc_rxq(vsi, i) { snprintf(p, ETH_GSTRING_LEN, - "rx-queue-%u.rx_packets", i); + "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } @@ -320,21 +906,24 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.tx-priority-%u-xon", i); + "tx_priority_%u_xon.nic", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.tx-priority-%u-xoff", i); + "tx_priority_%u_xoff.nic", i); p += ETH_GSTRING_LEN; } for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.rx-priority-%u-xon", i); + "rx_priority_%u_xon.nic", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.rx-priority-%u-xoff", i); + "rx_priority_%u_xoff.nic", i); p += ETH_GSTRING_LEN; } break; + case ETH_SS_TEST: + memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); + break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { snprintf(p, ETH_GSTRING_LEN, "%s", @@ -371,6 +960,185 @@ ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) } /** + * ice_set_fec_cfg - Set link FEC options + * @netdev: network interface device structure + * @req_fec: FEC mode to configure + */ +static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_set_phy_cfg_data config = { 0 }; + struct ice_aqc_get_phy_caps_data *caps; + struct ice_vsi *vsi = np->vsi; + u8 sw_cfg_caps, sw_cfg_fec; + struct ice_port_info *pi; + enum ice_status status; + int err = 0; + + pi = vsi->port_info; + if (!pi) + return -EOPNOTSUPP; + + /* Changing the FEC parameters is not supported if not the PF VSI */ + if (vsi->type != ICE_VSI_PF) { + netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n"); + return -EOPNOTSUPP; + } + + /* Get last SW configuration */ + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + caps, NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Copy SW configuration returned from PHY caps to PHY config */ + ice_copy_phy_caps_to_cfg(caps, &config); + sw_cfg_caps = caps->caps; + sw_cfg_fec = caps->link_fec_options; + + /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */ + memset(caps, 0, sizeof(*caps)); + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + caps, NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC); + config.link_fec_opt = caps->link_fec_options; + + ice_cfg_phy_fec(&config, req_fec); + + /* If FEC mode has changed, then set PHY configuration and enable AN. */ + if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) != + (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) || + config.link_fec_opt != sw_cfg_fec) { + if (caps->caps & ICE_AQC_PHY_AN_MODE) + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL); + + if (status) + err = -EAGAIN; + } + +done: + devm_kfree(&vsi->back->pdev->dev, caps); + return err; +} + +/** + * ice_set_fecparam - Set FEC link options + * @netdev: network interface device structure + * @fecparam: Ethtool structure to retrieve FEC parameters + */ +static int +ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + enum ice_fec_mode fec; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + fec = ICE_FEC_AUTO; + break; + case ETHTOOL_FEC_RS: + fec = ICE_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = ICE_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec = ICE_FEC_NONE; + break; + default: + dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n", + fecparam->fec); + return -EINVAL; + } + + return ice_set_fec_cfg(netdev, fec); +} + +/** + * ice_get_fecparam - Get link FEC options + * @netdev: network interface device structure + * @fecparam: Ethtool structure to retrieve FEC parameters + */ +static int +ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_get_phy_caps_data *caps; + struct ice_link_status *link_info; + struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; + enum ice_status status; + int err = 0; + + pi = vsi->port_info; + + if (!pi) + return -EOPNOTSUPP; + link_info = &pi->phy.link_info; + + /* Set FEC mode based on negotiated link info */ + switch (link_info->fec_info) { + case ICE_AQ_LINK_25G_KR_FEC_EN: + fecparam->active_fec = ETHTOOL_FEC_BASER; + break; + case ICE_AQ_LINK_25G_RS_528_FEC_EN: + /* fall through */ + case ICE_AQ_LINK_25G_RS_544_FEC_EN: + fecparam->active_fec = ETHTOOL_FEC_RS; + break; + default: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } + + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + caps, NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Set supported/configured FEC modes based on PHY capability */ + if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) + fecparam->fec |= ETHTOOL_FEC_AUTO; + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) + fecparam->fec |= ETHTOOL_FEC_BASER; + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + fecparam->fec |= ETHTOOL_FEC_RS; + if (caps->link_fec_options == 0) + fecparam->fec |= ETHTOOL_FEC_OFF; + +done: + devm_kfree(&vsi->back->pdev->dev, caps); + return err; +} + +/** * ice_get_priv_flags - report device private flags * @netdev: network interface device structure * @@ -433,10 +1201,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); - if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) { - if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) { + if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) { + if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) { enum ice_status status; + /* Disable FW LLDP engine */ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, NULL); /* If unregistering for LLDP events fails, this is @@ -450,7 +1219,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* The AQ call to stop the FW LLDP agent will generate * an error if the agent is already stopped. */ - status = ice_aq_stop_lldp(&pf->hw, true, NULL); + status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); if (status) dev_warn(&pf->pdev->dev, "Fail to stop LLDP agent\n"); @@ -458,9 +1227,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * will likely not need DCB, so failure to init is * not a concern of ethtool */ - status = ice_init_pf_dcb(pf); + status = ice_init_pf_dcb(pf, true); if (status) dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); + + /* Forward LLDP packets to default VSI so that they + * are passed up the stack + */ + ice_cfg_sw_lldp(vsi, false, true); } else { enum ice_status status; bool dcbx_agent_status; @@ -468,12 +1242,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* AQ command to start FW LLDP agent will return an * error if the agent is already started */ - status = ice_aq_start_lldp(&pf->hw, NULL); + status = ice_aq_start_lldp(&pf->hw, true, NULL); if (status) dev_warn(&pf->pdev->dev, "Fail to start LLDP Agent\n"); - /* AQ command to start FW DCBx agent will fail if + /* AQ command to start FW DCBX agent will fail if * the agent is already started */ status = ice_aq_start_stop_dcbx(&pf->hw, true, @@ -491,15 +1265,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * registration/init failed but do not return error * state to ethtool */ - status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, - NULL); - if (status) - dev_dbg(&pf->pdev->dev, - "Fail to reg for MIB change\n"); - - status = ice_init_pf_dcb(pf); + status = ice_init_pf_dcb(pf, true); if (status) dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); + + /* Remove rule to direct LLDP packets to default VSI. + * The FW LLDP engine will now be consuming them. + */ + ice_cfg_sw_lldp(vsi, false, false); } } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); @@ -529,6 +1302,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * not safe. */ return ICE_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return ICE_TEST_LEN; case ETH_SS_PRIV_FLAGS: return ICE_PRIV_FLAG_ARRAY_SIZE; default: @@ -628,7 +1403,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 100baseT_Full); } @@ -636,14 +1412,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseKX_Full); } @@ -651,14 +1429,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseX_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseX_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseT_Full); } @@ -666,7 +1446,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseX_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) ethtool_link_ksettings_add_link_mode(ks, advertising, 2500baseX_Full); } @@ -674,7 +1455,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 5000baseT_Full); } @@ -684,28 +1466,32 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseKR_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseSR_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseLR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseLR_Full); } @@ -717,7 +1503,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); } @@ -725,7 +1512,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); } @@ -734,14 +1522,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseKR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseKR4_Full); } @@ -750,21 +1540,24 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseSR4_Full); } if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseLR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseLR4_Full); } @@ -779,7 +1572,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseCR2_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 50000baseCR2_Full); } @@ -787,7 +1581,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseKR2_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 50000baseKR2_Full); } @@ -797,7 +1592,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseSR2_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) ethtool_link_ksettings_add_link_mode(ks, advertising, 50000baseSR2_Full); } @@ -814,7 +1610,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) need_add_adv_mode = true; } if (need_add_adv_mode) { @@ -826,7 +1623,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseSR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) need_add_adv_mode = true; } if (need_add_adv_mode) { @@ -838,7 +1636,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseLR4_ER4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) need_add_adv_mode = true; } if (need_add_adv_mode) { @@ -851,7 +1650,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseKR4_Full); - if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + if (!hw_link_info->req_speeds || + hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) need_add_adv_mode = true; } if (need_add_adv_mode) @@ -1275,6 +2075,7 @@ ice_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; @@ -1345,6 +2146,40 @@ ice_get_link_ksettings(struct net_device *netdev, break; } + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + goto done; + + if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP, + caps, NULL)) + netdev_info(netdev, "Get phy capability failed.\n"); + + /* Set supported FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + + if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG, + caps, NULL)) + netdev_info(netdev, "Get phy capability failed.\n"); + + /* Set advertised FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + +done: + devm_kfree(&vsi->back->pdev->dev, caps); return 0; } @@ -2371,8 +3206,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; - wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector + - rc->ring->q_vector->v_idx), + wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high, pf->hw.intrl_gran)); } @@ -2533,6 +3367,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_regs = ice_get_regs, .get_msglevel = ice_get_msglevel, .set_msglevel = ice_set_msglevel, + .self_test = ice_self_test, .get_link = ethtool_op_get_link, .get_eeprom_len = ice_get_eeprom_len, .get_eeprom = ice_get_eeprom, @@ -2557,6 +3392,8 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_per_queue_coalesce = ice_get_per_q_coalesce, .set_per_queue_coalesce = ice_set_per_q_coalesce, + .get_fecparam = ice_get_fecparam, + .set_fecparam = ice_set_fecparam, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index ec25f26069b0..6c5ce05742b1 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,6 +6,9 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ +#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) +#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) +#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD_HEAD_S 0 @@ -155,6 +158,7 @@ #define PFINT_OICR_HMC_ERR_M BIT(26) #define PFINT_OICR_PE_CRITERR_M BIT(28) #define PFINT_OICR_VFLR_M BIT(29) +#define PFINT_OICR_SWINT_M BIT(31) #define PFINT_OICR_CTL 0x0016CA80 #define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_OICR_CTL_ITR_INDX_S 11 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index fbf1eba0cc2a..a19f5920733b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -137,6 +137,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) * for PF or EMP this field should be set to zero */ switch (vsi->type) { + case ICE_VSI_LB: + /* fall through */ case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; @@ -251,6 +253,10 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rxrings; + /* There is no need to allocate q_vectors for a loopback VSI. */ + if (vsi->type == ICE_VSI_LB) + return 0; + /* allocate memory for q_vector pointers */ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, sizeof(*vsi->q_vectors), GFP_KERNEL); @@ -275,6 +281,8 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: + /* fall through */ + case ICE_VSI_LB: vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; break; @@ -313,10 +321,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = vf->num_vf_qs; /* pf->num_vf_msix includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number - * of queues vectors, subtract 1 from the original vector - * count + * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the + * original vector count */ - vsi->num_q_vectors = pf->num_vf_msix - 1; + vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF; + break; + case ICE_VSI_LB: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; break; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -516,6 +528,10 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) if (ice_vsi_alloc_arrays(vsi)) goto err_rings; break; + case ICE_VSI_LB: + if (ice_vsi_alloc_arrays(vsi)) + goto err_rings; + break; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); goto unlock_pf; @@ -732,6 +748,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; break; + case ICE_VSI_LB: + break; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -924,6 +942,9 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; + case ICE_VSI_LB: + dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type); + return; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); return; @@ -955,6 +976,8 @@ static int ice_vsi_init(struct ice_vsi *vsi) ctxt->info = vsi->info; switch (vsi->type) { + case ICE_VSI_LB: + /* fall through */ case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; @@ -1145,61 +1168,32 @@ err_out: static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int num_q_vectors = 0; + u16 num_q_vectors; + + /* SRIOV doesn't grab irq_tracker entries for each VSI */ + if (vsi->type == ICE_VSI_VF) + return 0; - if (vsi->sw_base_vector || vsi->hw_base_vector) { - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n", - vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector); + if (vsi->base_vector) { + dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", + vsi->vsi_num, vsi->base_vector); return -EEXIST; } if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) return -ENOENT; - switch (vsi->type) { - case ICE_VSI_PF: - num_q_vectors = vsi->num_q_vectors; - /* reserve slots from OS requested IRQs */ - vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker, - num_q_vectors, vsi->idx); - if (vsi->sw_base_vector < 0) { - dev_err(&pf->pdev->dev, - "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, - vsi->sw_base_vector); - return -ENOENT; - } - pf->num_avail_sw_msix -= num_q_vectors; - - /* reserve slots from HW interrupts */ - vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, - num_q_vectors, vsi->idx); - break; - case ICE_VSI_VF: - /* take VF misc vector and data vectors into account */ - num_q_vectors = pf->num_vf_msix; - /* For VF VSI, reserve slots only from HW interrupts */ - vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, - num_q_vectors, vsi->idx); - break; - default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); - break; - } - - if (vsi->hw_base_vector < 0) { + num_q_vectors = vsi->num_q_vectors; + /* reserve slots from OS requested IRQs */ + vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + vsi->idx); + if (vsi->base_vector < 0) { dev_err(&pf->pdev->dev, - "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", - num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); - if (vsi->type != ICE_VSI_VF) { - ice_free_res(pf->sw_irq_tracker, - vsi->sw_base_vector, vsi->idx); - pf->num_avail_sw_msix += num_q_vectors; - } + "Failed to get tracking for %d vectors for VSI %d, err=%d\n", + num_q_vectors, vsi->vsi_num, vsi->base_vector); return -ENOENT; } - - pf->num_avail_hw_msix -= num_q_vectors; + pf->num_avail_sw_msix -= num_q_vectors; return 0; } @@ -1842,8 +1836,73 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) } /** + * ice_cfg_txq_interrupt - configure interrupt on Tx queue + * @vsi: the VSI being configured + * @txq: Tx queue being mapped to MSI-X vector + * @msix_idx: MSI-X vector index within the function + * @itr_idx: ITR index of the interrupt cause + * + * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector + * within the function space. + */ +#ifdef CONFIG_PCI_IOV +void +ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) +#else +static void +ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) +#endif /* CONFIG_PCI_IOV */ +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + + itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; + + val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | + ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); + + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); +} + +/** + * ice_cfg_rxq_interrupt - configure interrupt on Rx queue + * @vsi: the VSI being configured + * @rxq: Rx queue being mapped to MSI-X vector + * @msix_idx: MSI-X vector index within the function + * @itr_idx: ITR index of the interrupt cause + * + * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector + * within the function space. + */ +#ifdef CONFIG_PCI_IOV +void +ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) +#else +static void +ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) +#endif /* CONFIG_PCI_IOV */ +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + + itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; + + val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | + ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); + + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + + ice_flush(hw); +} + +/** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured + * + * This configures MSIX mode interrupts for the PF VSI, and should not be used + * for the VF VSI. */ void ice_vsi_cfg_msix(struct ice_vsi *vsi) { @@ -1873,43 +1932,17 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) * tracked for this PF. */ for (q = 0; q < q_vector->num_ring_tx; q++) { - int itr_idx = (q_vector->tx.itr_idx << - QINT_TQCTL_ITR_INDX_S) & - QINT_TQCTL_ITR_INDX_M; - u32 val; - - if (vsi->type == ICE_VSI_VF) - val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | - (((i + 1) << QINT_TQCTL_MSIX_INDX_S) & - QINT_TQCTL_MSIX_INDX_M); - else - val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | - ((reg_idx << QINT_TQCTL_MSIX_INDX_S) & - QINT_TQCTL_MSIX_INDX_M); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + ice_cfg_txq_interrupt(vsi, txq, reg_idx, + q_vector->tx.itr_idx); txq++; } for (q = 0; q < q_vector->num_ring_rx; q++) { - int itr_idx = (q_vector->rx.itr_idx << - QINT_RQCTL_ITR_INDX_S) & - QINT_RQCTL_ITR_INDX_M; - u32 val; - - if (vsi->type == ICE_VSI_VF) - val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | - (((i + 1) << QINT_RQCTL_MSIX_INDX_S) & - QINT_RQCTL_MSIX_INDX_M); - else - val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | - ((reg_idx << QINT_RQCTL_MSIX_INDX_S) & - QINT_RQCTL_MSIX_INDX_M); - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + ice_cfg_rxq_interrupt(vsi, rxq, reg_idx, + q_vector->rx.itr_idx); rxq++; } } - - ice_flush(hw); } /** @@ -2024,6 +2057,19 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) } /** + * ice_trigger_sw_intr - trigger a software interrupt + * @hw: pointer to the HW structure + * @q_vector: interrupt vector to trigger the software interrupt for + */ +void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) +{ + wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), + (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_M); +} + +/** * ice_vsi_stop_tx_rings - Disable Tx rings * @vsi: the VSI being configured * @rst_src: reset source @@ -2070,8 +2116,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - if (!rings || !rings[q_idx] || - !rings[q_idx]->q_vector) { + struct ice_q_vector *q_vector; + + if (!rings || !rings[q_idx]) { err = -EINVAL; goto err_out; } @@ -2091,9 +2138,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, /* trigger a software interrupt for the vector * associated to the queue to schedule NAPI handler */ - wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx), - GLINT_DYN_CTL_SWINT_TRIG_M | - GLINT_DYN_CTL_INTENA_MSK_M); + q_vector = rings[i]->q_vector; + if (q_vector) + ice_trigger_sw_intr(hw, q_vector); + q_idx++; } status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, @@ -2234,7 +2282,14 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) goto clear_reg_idx; } - q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector; + if (vsi->type == ICE_VSI_VF) { + struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; + + q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); + } else { + q_vector->reg_idx = + q_vector->v_idx + vsi->base_vector; + } } return 0; @@ -2291,6 +2346,54 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) } /** + * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling + * @vsi: the VSI being configured + * @tx: bool to determine Tx or Rx rule + * @create: bool to determine create or remove Rule + */ +void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) +{ + struct ice_fltr_list_entry *list; + struct ice_pf *pf = vsi->back; + LIST_HEAD(tmp_add_list); + enum ice_status status; + + list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + if (!list) + return; + + list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; + list->fltr_info.vsi_handle = vsi->idx; + list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP; + + if (tx) { + list->fltr_info.fltr_act = ICE_DROP_PACKET; + list->fltr_info.flag = ICE_FLTR_TX; + list->fltr_info.src_id = ICE_SRC_ID_VSI; + } else { + list->fltr_info.fltr_act = ICE_FWD_TO_VSI; + list->fltr_info.flag = ICE_FLTR_RX; + list->fltr_info.src_id = ICE_SRC_ID_LPORT; + } + + INIT_LIST_HEAD(&list->list_entry); + list_add(&list->list_entry, &tmp_add_list); + + if (create) + status = ice_add_eth_mac(&pf->hw, &tmp_add_list); + else + status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); + + if (status) + dev_err(&pf->pdev->dev, + "Fail %s %s LLDP rule on VSI %i error: %d\n", + create ? "adding" : "removing", tx ? "TX" : "RX", + vsi->vsi_num, status); + + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); +} + +/** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @pi: pointer to the port_info instance @@ -2310,6 +2413,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = &pf->pdev->dev; + enum ice_status status; struct ice_vsi *vsi; int ret, i; @@ -2389,23 +2493,24 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_alloc_q_vector; - /* Setup Vector base only during VF init phase or when VF asks - * for more vectors than assigned number. In all other cases, - * assign hw_base_vector to the value given earlier. - */ - if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto unroll_vector_base; - } else { - vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; - } ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto unroll_vector_base; pf->q_left_tx -= vsi->alloc_txq; pf->q_left_rx -= vsi->alloc_rxq; + + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + ice_vsi_cfg_rss_lut_key(vsi); + break; + case ICE_VSI_LB: + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_vsi_init; break; default: /* clean up the resources and exit */ @@ -2416,12 +2521,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, for (i = 0; i < vsi->tc_cfg.numtc; i++) max_txqs[i] = pf->num_lan_tx; - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (ret) { + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (status) { dev_err(&pf->pdev->dev, "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, ret); + vsi->vsi_num, status); goto unroll_vector_base; } @@ -2430,19 +2535,28 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * out PAUSE or PFC frames. If enabled, FW can still send FC frames. * The rule is added once for PF VSI in order to create appropriate * recipe, since VSI/VSI list is ignored with drop action... + * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets + * need to be dropped so that VFs cannot send LLDP packets to reconfig + * DCB settings in the HW. Also, if the FW DCBX engine is not running + * then Rx LLDP packets need to be redirected up the stack. */ - if (vsi->type == ICE_VSI_PF) + if (vsi->type == ICE_VSI_PF) { ice_vsi_add_rem_eth_mac(vsi, true); + /* Tx LLDP packets */ + ice_cfg_sw_lldp(vsi, true, true); + + /* Rx LLDP packets */ + if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) + ice_cfg_sw_lldp(vsi, false, true); + } + return vsi; unroll_vector_base: /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; - /* reclaim HW interrupt back to the common pool */ - ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; unroll_alloc_q_vector: ice_vsi_free_q_vectors(vsi); unroll_vsi_init: @@ -2463,17 +2577,17 @@ unroll_get_qs: static void ice_vsi_release_msix(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - u16 vector = vsi->hw_base_vector; struct ice_hw *hw = &pf->hw; u32 txq = 0; u32 rxq = 0; int i, q; - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + for (i = 0; i < vsi->num_q_vectors; i++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; + u16 reg_idx = q_vector->reg_idx; - wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); - wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); for (q = 0; q < q_vector->num_ring_tx; q++) { wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); txq++; @@ -2495,7 +2609,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int base = vsi->sw_base_vector; + int base = vsi->base_vector; if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { int i; @@ -2591,11 +2705,11 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) int count = 0; int i; - if (!res || index >= res->num_entries) + if (!res || index >= res->end) return -EINVAL; id |= ICE_RES_VALID_BIT; - for (i = index; i < res->num_entries && res->list[i] == id; i++) { + for (i = index; i < res->end && res->list[i] == id; i++) { res->list[i] = 0; count++; } @@ -2613,10 +2727,9 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) */ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) { - int start = res->search_hint; - int end = start; + int start = 0, end = 0; - if ((start + needed) > res->num_entries) + if (needed > res->end) return -ENOMEM; id |= ICE_RES_VALID_BIT; @@ -2625,7 +2738,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) /* skip already allocated entries */ if (res->list[end++] & ICE_RES_VALID_BIT) { start = end; - if ((start + needed) > res->num_entries) + if ((start + needed) > res->end) break; } @@ -2636,13 +2749,9 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) while (i != end) res->list[i++] = id; - if (end == res->num_entries) - end = 0; - - res->search_hint = end; return start; } - } while (1); + } while (end < res->end); return -ENOMEM; } @@ -2654,16 +2763,11 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) * @needed: size of the block needed * @id: identifier to track owner * - * Returns the base item index of the block, or -ENOMEM for error - * The search_hint trick and lack of advanced fit-finding only works - * because we're highly likely to have all the same sized requests. - * Linear search time and any fragmentation should be minimal. + * Returns the base item index of the block, or negative for error */ int ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) { - int ret; - if (!res || !pf) return -EINVAL; @@ -2674,16 +2778,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) return -EINVAL; } - /* search based on search_hint */ - ret = ice_search_res(res, needed, id); - - if (ret < 0) { - /* previous search failed. Reset search hint and try again */ - res->search_hint = 0; - ret = ice_search_res(res, needed, id); - } - - return ret; + return ice_search_res(res, needed, id); } /** @@ -2692,7 +2787,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) */ void ice_vsi_dis_irq(struct ice_vsi *vsi) { - int base = vsi->sw_base_vector; + int base = vsi->base_vector; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 val; @@ -2738,6 +2833,21 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** + * ice_napi_del - Remove NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be removed + */ +void ice_napi_del(struct ice_vsi *vsi) +{ + int v_idx; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, v_idx) + netif_napi_del(&vsi->q_vectors[v_idx]->napi); +} + +/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -2745,60 +2855,61 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) */ int ice_vsi_release(struct ice_vsi *vsi) { - struct ice_vf *vf = NULL; struct ice_pf *pf; if (!vsi->back) return -ENODEV; pf = vsi->back; - if (vsi->type == ICE_VSI_VF) - vf = &pf->vf[vsi->vf_id]; - /* do not unregister and free netdevs while driver is in the reset - * recovery pending state. Since reset/rebuild happens through PF - * service task workqueue, its not a good idea to unregister netdev - * that is associated to the PF that is running the work queue items - * currently. This is done to avoid check_flush_dependency() warning - * on this wq + /* do not unregister while driver is in the reset recovery pending + * state. Since reset/rebuild happens through PF service task workqueue, + * it's not a good idea to unregister netdev that is associated to the + * PF that is running the work queue items currently. This is done to + * avoid check_flush_dependency() warning on this wq */ - if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) { - ice_napi_del(vsi); + if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_rss_clean(vsi); /* Disable VSI and free resources */ - ice_vsi_dis_irq(vsi); + if (vsi->type != ICE_VSI_LB) + ice_vsi_dis_irq(vsi); ice_vsi_close(vsi); - /* reclaim interrupt vectors back to PF */ + /* SR-IOV determines needed MSIX resources all at once instead of per + * VSI since when VFs are spawned we know how many VFs there are and how + * many interrupts each VF needs. SR-IOV MSIX resources are also + * cleared in the same manner. + */ if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; - /* reclaim HW interrupts back to the common pool */ - ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; - } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { - /* Reclaim VF resources back only while freeing all VFs or - * vector reassignment is requested - */ - ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, - vsi->idx); - pf->num_avail_hw_msix += pf->num_vf_msix; } - if (vsi->type == ICE_VSI_PF) + if (vsi->type == ICE_VSI_PF) { ice_vsi_add_rem_eth_mac(vsi, false); + ice_cfg_sw_lldp(vsi, true, false); + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + } ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); + + /* make sure unregister_netdev() was called by checking __ICE_DOWN */ + if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + ice_vsi_clear_rings(vsi); ice_vsi_put_qs(vsi); @@ -2825,6 +2936,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_vf *vf = NULL; + enum ice_status status; struct ice_pf *pf; int ret, i; @@ -2838,24 +2950,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); + /* SR-IOV determines needed MSIX resources all at once instead of per + * VSI since when VFs are spawned we know how many VFs there are and how + * many interrupts each VF needs. SR-IOV MSIX resources are also + * cleared in the same manner. + */ if (vsi->type != ICE_VSI_VF) { /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); pf->num_avail_sw_msix += vsi->num_q_vectors; - vsi->sw_base_vector = 0; - /* reclaim HW interrupts back to the common pool */ - ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, - vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; - } else { - /* Reclaim VF resources back to the common pool for reset and - * and rebuild, with vector reassignment - */ - ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, - vsi->idx); - pf->num_avail_hw_msix += pf->num_vf_msix; + vsi->base_vector = 0; } - vsi->hw_base_vector = 0; ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); @@ -2881,10 +2986,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -2929,12 +3030,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) for (i = 0; i < vsi->tc_cfg.numtc; i++) max_txqs[i] = pf->num_lan_tx; - ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (ret) { + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (status) { dev_err(&pf->pdev->dev, "VSI %d failed lan queue config, error %d\n", - vsi->vsi_num, ret); + vsi->vsi_num, status); goto err_vectors; } return 0; @@ -2956,7 +3057,7 @@ err_vsi: /** * ice_is_reset_in_progress - check for a reset in progress - * @state: pf state field + * @state: PF state field */ bool ice_is_reset_in_progress(unsigned long *state) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index a91d3553cc89..6e43ef03bfc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -19,6 +19,14 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi); +#ifdef CONFIG_PCI_IOV +void +ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); + +void +ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); +#endif /* CONFIG_PCI_IOV */ + int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); @@ -37,6 +45,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); +void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); + void ice_vsi_delete(struct ice_vsi *vsi); int ice_vsi_clear(struct ice_vsi *vsi); @@ -49,6 +59,8 @@ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, enum ice_vsi_type type, u16 vf_id); +void ice_napi_del(struct ice_vsi *vsi); + int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); @@ -64,6 +76,8 @@ bool ice_is_reset_in_progress(unsigned long *state); void ice_vsi_free_q_vectors(struct ice_vsi *vsi); +void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); + void ice_vsi_put_qs(struct ice_vsi *vsi); #ifdef CONFIG_DCB diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 7843abf4d44d..28ec0d57941d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -61,9 +61,10 @@ static u32 ice_get_tx_pending(struct ice_ring *ring) static void ice_check_for_hang_subtask(struct ice_pf *pf) { struct ice_vsi *vsi = NULL; + struct ice_hw *hw; unsigned int i; - u32 v, v_idx; int packets; + u32 v; ice_for_each_vsi(pf, v) if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { @@ -77,12 +78,12 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) return; + hw = &vsi->back->hw; + for (i = 0; i < vsi->num_txq; i++) { struct ice_ring *tx_ring = vsi->tx_rings[i]; if (tx_ring && tx_ring->desc) { - int itr = ICE_ITR_NONE; - /* If packet counter has not changed the queue is * likely stalled, so force an interrupt for this * queue. @@ -93,12 +94,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) packets = tx_ring->stats.pkts & INT_MAX; if (tx_ring->tx_stats.prev_pkt == packets) { /* Trigger sw interrupt to revive the queue */ - v_idx = tx_ring->q_vector->v_idx; - wr32(&vsi->back->hw, - GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), - (itr << GLINT_DYN_CTL_ITR_INDX_S) | - GLINT_DYN_CTL_SWINT_TRIG_M | - GLINT_DYN_CTL_INTENA_MSK_M); + ice_trigger_sw_intr(hw, tx_ring->q_vector); continue; } @@ -113,6 +109,67 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) } /** + * ice_init_mac_fltr - Set initial MAC filters + * @pf: board private structure + * + * Set initial set of MAC filters for PF VSI; configure filters for permanent + * address and broadcast address. If an error is encountered, netdevice will be + * unregistered. + */ +static int ice_init_mac_fltr(struct ice_pf *pf) +{ + LIST_HEAD(tmp_add_list); + u8 broadcast[ETH_ALEN]; + struct ice_vsi *vsi; + int status; + + vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + if (!vsi) + return -EINVAL; + + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. + */ + + /* Add a unicast MAC filter so the VSI can get its packets */ + status = ice_add_mac_to_list(vsi, &tmp_add_list, + vsi->port_info->mac.perm_addr); + if (status) + goto unregister; + + /* VSI needs to receive broadcast traffic, so add the broadcast + * MAC address to the list as well. + */ + eth_broadcast_addr(broadcast); + status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); + if (status) + goto free_mac_list; + + /* Program MAC filters for entries in tmp_add_list */ + status = ice_add_mac(&pf->hw, &tmp_add_list); + if (status) + status = -ENOMEM; + +free_mac_list: + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + +unregister: + /* We aren't useful with no MAC filters, so unregister if we + * had an error + */ + if (status && vsi->netdev->reg_state == NETREG_REGISTERED) { + dev_err(&pf->pdev->dev, + "Could not add MAC filters error %d. Unregistering device\n", + status); + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + + return status; +} + +/** * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced * @netdev: the net device on which the sync is happening * @addr: MAC address to sync @@ -567,7 +624,11 @@ static void ice_reset_subtask(struct ice_pf *pf) */ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { + struct ice_aqc_get_phy_caps_data *caps; + enum ice_status status; + const char *fec_req; const char *speed; + const char *fec; const char *fc; if (!vsi) @@ -584,6 +645,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } switch (vsi->port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + speed = "100 G"; + break; + case ICE_AQ_LINK_SPEED_50GB: + speed = "50 G"; + break; case ICE_AQ_LINK_SPEED_40GB: speed = "40 G"; break; @@ -615,13 +682,13 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) switch (vsi->port_info->fc.current_mode) { case ICE_FC_FULL: - fc = "RX/TX"; + fc = "Rx/Tx"; break; case ICE_FC_TX_PAUSE: - fc = "TX"; + fc = "Tx"; break; case ICE_FC_RX_PAUSE: - fc = "RX"; + fc = "Rx"; break; case ICE_FC_NONE: fc = "None"; @@ -631,8 +698,47 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) break; } - netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", - speed, fc); + /* Get FEC mode based on negotiated link info */ + switch (vsi->port_info->phy.link_info.fec_info) { + case ICE_AQ_LINK_25G_RS_528_FEC_EN: + /* fall through */ + case ICE_AQ_LINK_25G_RS_544_FEC_EN: + fec = "RS-FEC"; + break; + case ICE_AQ_LINK_25G_KR_FEC_EN: + fec = "FC-FEC/BASE-R"; + break; + default: + fec = "NONE"; + break; + } + + /* Get FEC mode requested based on PHY caps last SW configuration */ + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) { + fec_req = "Unknown"; + goto done; + } + + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_SW_CFG, caps, NULL); + if (status) + netdev_info(vsi->netdev, "Get phy capability failed.\n"); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) + fec_req = "RS-FEC"; + else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) + fec_req = "FC-FEC/BASE-R"; + else + fec_req = "NONE"; + + devm_kfree(&vsi->back->pdev->dev, caps); + +done: + netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n", + speed, fec_req, fec, fc); } /** @@ -664,7 +770,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) /** * ice_link_event - process the link event - * @pf: pf that the link event is associated with + * @pf: PF that the link event is associated with * @pi: port_info for the port that the link event is associated with * @link_up: true if the physical link is up and false if it is down * @link_speed: current link speed received from the link event @@ -774,7 +880,7 @@ static int ice_init_link_events(struct ice_port_info *pi) /** * ice_handle_link_event - handle link event via ARQ - * @pf: pf that the link event is associated with + * @pf: PF that the link event is associated with * @event: event structure containing link status info */ static int @@ -1161,16 +1267,16 @@ static void ice_handle_mdd_event(struct ice_pf *pf) } } - /* see if one of the VFs needs to be reset */ - for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + /* check to see if one of the VFs caused the MDD */ + for (i = 0; i < pf->num_alloc_vfs; i++) { struct ice_vf *vf = &pf->vf[i]; - mdd_detected = false; + bool vf_mdd_detected = false; reg = rd32(hw, VP_MDET_TX_PQM(i)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); - mdd_detected = true; + vf_mdd_detected = true; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); } @@ -1178,7 +1284,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, VP_MDET_TX_TCLAN(i)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); - mdd_detected = true; + vf_mdd_detected = true; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); } @@ -1186,7 +1292,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, VP_MDET_TX_TDPU(i)); if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); - mdd_detected = true; + vf_mdd_detected = true; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); } @@ -1194,19 +1300,18 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, VP_MDET_RX(i)); if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); - mdd_detected = true; + vf_mdd_detected = true; dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i); } - if (mdd_detected) { + if (vf_mdd_detected) { vf->num_mdd_events++; - dev_info(&pf->pdev->dev, - "Use PF Control I/F to re-enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); + if (vf->num_mdd_events > 1) + dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n", + i, vf->num_mdd_events); } } - } /** @@ -1327,7 +1432,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; - int base = vsi->sw_base_vector; + int base = vsi->base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; @@ -1408,7 +1513,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -1430,6 +1535,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); + if (oicr & PFINT_OICR_SWINT_M) { + ena_mask &= ~PFINT_OICR_SWINT_M; + pf->sw_int_count++; + } + if (oicr & PFINT_OICR_MAL_DETECT_M) { ena_mask &= ~PFINT_OICR_MAL_DETECT_M; set_bit(__ICE_MDD_EVENT_PENDING, pf->state); @@ -1556,15 +1666,13 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ice_flush(hw); if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); + synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); devm_free_irq(&pf->pdev->dev, - pf->msix_entries[pf->sw_oicr_idx].vector, pf); + pf->msix_entries[pf->oicr_idx].vector, pf); } pf->num_avail_sw_msix += 1; - ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); - pf->num_avail_hw_msix += 1; - ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); + ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); } /** @@ -1618,43 +1726,31 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) if (ice_is_reset_in_progress(pf->state)) goto skip_req_irq; - /* reserve one vector in sw_irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + /* reserve one vector in irq_tracker for misc interrupts */ + oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); if (oicr_idx < 0) return oicr_idx; pf->num_avail_sw_msix -= 1; - pf->sw_oicr_idx = oicr_idx; - - /* reserve one vector in hw_irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); - if (oicr_idx < 0) { - ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); - pf->num_avail_sw_msix += 1; - return oicr_idx; - } - pf->num_avail_hw_msix -= 1; - pf->hw_oicr_idx = oicr_idx; + pf->oicr_idx = oicr_idx; err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[pf->sw_oicr_idx].vector, + pf->msix_entries[pf->oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); if (err) { dev_err(&pf->pdev->dev, "devm_request_irq for %s failed: %d\n", pf->int_name, err); - ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); + ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; - ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); - pf->num_avail_hw_msix += 1; return err; } skip_req_irq: ice_ena_misc_vector(pf); - ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx); - wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), + ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); ice_flush(hw); @@ -1664,21 +1760,6 @@ skip_req_irq: } /** - * ice_napi_del - Remove NAPI handler for the VSI - * @vsi: VSI for which NAPI handler is to be removed - */ -void ice_napi_del(struct ice_vsi *vsi) -{ - int v_idx; - - if (!vsi->netdev) - return; - - ice_for_each_q_vector(vsi, v_idx) - netif_napi_del(&vsi->q_vectors[v_idx]->napi); -} - -/** * ice_napi_add - register NAPI handler for the VSI * @vsi: VSI for which NAPI handler is to be registered * @@ -1803,8 +1884,8 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) * @pf: board private structure * @pi: pointer to the port_info instance * - * Returns pointer to the successfully allocated VSI sw struct on success, - * otherwise returns NULL on failure. + * Returns pointer to the successfully allocated VSI software struct + * on success, otherwise returns NULL on failure. */ static struct ice_vsi * ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) @@ -1813,6 +1894,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** + * ice_lb_vsi_setup - Set up a loopback VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI software struct + * on success, otherwise returns NULL on failure. + */ +struct ice_vsi * +ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); +} + +/** * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload * @netdev: network interface to be adjusted * @proto: unused protocol @@ -1900,8 +1995,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, */ static int ice_setup_pf_sw(struct ice_pf *pf) { - LIST_HEAD(tmp_add_list); - u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; int status = 0; @@ -1926,38 +2019,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); - /* To add a MAC filter, first add the MAC to a list and then - * pass the list to ice_add_mac. - */ - - /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_add_mac_to_list(vsi, &tmp_add_list, - vsi->port_info->mac.perm_addr); + status = ice_init_mac_fltr(pf); if (status) goto unroll_napi_add; - /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list as well. - */ - eth_broadcast_addr(broadcast); - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); - if (status) - goto free_mac_list; - - /* program MAC filters for entries in tmp_add_list */ - status = ice_add_mac(&pf->hw, &tmp_add_list); - if (status) { - dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); - status = -ENOMEM; - goto free_mac_list; - } - - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; -free_mac_list: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); - unroll_napi_add: if (vsi) { ice_napi_del(vsi); @@ -2149,14 +2216,9 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) ice_dis_msix(pf); - if (pf->sw_irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); - pf->sw_irq_tracker = NULL; - } - - if (pf->hw_irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); - pf->hw_irq_tracker = NULL; + if (pf->irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->irq_tracker); + pf->irq_tracker = NULL; } } @@ -2166,7 +2228,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) */ static int ice_init_interrupt_scheme(struct ice_pf *pf) { - int vectors = 0, hw_vectors = 0; + int vectors; if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) vectors = ice_ena_msix_range(pf); @@ -2177,31 +2239,18 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) return vectors; /* set up vector assignment tracking */ - pf->sw_irq_tracker = - devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) + + pf->irq_tracker = + devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) + (sizeof(u16) * vectors), GFP_KERNEL); - if (!pf->sw_irq_tracker) { + if (!pf->irq_tracker) { ice_dis_msix(pf); return -ENOMEM; } /* populate SW interrupts pool with number of OS granted IRQs. */ pf->num_avail_sw_msix = vectors; - pf->sw_irq_tracker->num_entries = vectors; - - /* set up HW vector assignment tracking */ - hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - pf->hw_irq_tracker = - devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) + - (sizeof(u16) * hw_vectors), GFP_KERNEL); - if (!pf->hw_irq_tracker) { - ice_clear_interrupt_scheme(pf); - return -ENOMEM; - } - - /* populate HW interrupts pool with number of HW supported irqs. */ - pf->num_avail_hw_msix = hw_vectors; - pf->hw_irq_tracker->num_entries = hw_vectors; + pf->irq_tracker->num_entries = vectors; + pf->irq_tracker->end = pf->irq_tracker->num_entries; return 0; } @@ -2252,7 +2301,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (!pf) return -ENOMEM; - /* set up for high or low dma */ + /* set up for high or low DMA */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); @@ -2302,7 +2351,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_init_pf(pf); - err = ice_init_pf_dcb(pf); + err = ice_init_pf_dcb(pf, false); if (err) { clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags); @@ -2368,7 +2417,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_setup_pf_sw(pf); if (err) { - dev_err(dev, "probe failed due to setup pf switch:%d\n", err); + dev_err(dev, "probe failed due to setup PF switch:%d\n", err); goto err_alloc_sw_unroll; } @@ -2625,7 +2674,7 @@ static int __init ice_module_init(void) status = pci_register_driver(&ice_driver); if (status) { - pr_err("failed to register pci driver, err %d\n", status); + pr_err("failed to register PCI driver, err %d\n", status); destroy_workqueue(ice_wq); } @@ -2725,21 +2774,21 @@ free_lists: ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); if (err) { - netdev_err(netdev, "can't set mac %pM. filter update failed\n", + netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); return err; } /* change the netdev's MAC address */ memcpy(netdev->dev_addr, mac, netdev->addr_len); - netdev_dbg(vsi->netdev, "updated mac address to %pM\n", + netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", netdev->dev_addr); /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", + netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n", mac); } return 0; @@ -2876,6 +2925,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) ret = ice_vsi_manage_vlan_insertion(vsi); + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && + !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + ret = ice_cfg_vlan_pruning(vsi, true, false); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + ret = ice_cfg_vlan_pruning(vsi, false, false); + return ret; } @@ -2901,7 +2957,7 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) * * Return 0 on success and negative value on error */ -static int ice_vsi_cfg(struct ice_vsi *vsi) +int ice_vsi_cfg(struct ice_vsi *vsi) { int err; @@ -2933,7 +2989,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, q_idx) { + ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; if (q_vector->rx.ring || q_vector->tx.ring) @@ -3456,7 +3512,7 @@ int ice_down(struct ice_vsi *vsi) * * Return 0 on success, negative on failure */ -static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) +int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) { int i, err = 0; @@ -3482,7 +3538,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) * * Return 0 on success, negative on failure */ -static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) +int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) { int i, err = 0; @@ -3658,7 +3714,7 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) } /** - * ice_vsi_rebuild_all - rebuild all VSIs in pf + * ice_vsi_rebuild_all - rebuild all VSIs in PF * @pf: the PF */ static int ice_vsi_rebuild_all(struct ice_pf *pf) @@ -3728,7 +3784,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf) /** * ice_rebuild - rebuild after reset - * @pf: pf to rebuild + * @pf: PF to rebuild */ static void ice_rebuild(struct ice_pf *pf) { @@ -3740,7 +3796,7 @@ static void ice_rebuild(struct ice_pf *pf) if (test_bit(__ICE_DOWN, pf->state)) goto clear_recovery; - dev_dbg(dev, "rebuilding pf\n"); + dev_dbg(dev, "rebuilding PF\n"); ret = ice_init_all_ctrlq(hw); if (ret) { @@ -3768,12 +3824,6 @@ static void ice_rebuild(struct ice_pf *pf) ice_dcb_rebuild(pf); - /* reset search_hint of irq_trackers to 0 since interrupts are - * reclaimed and could be allocated from beginning during VSI rebuild - */ - pf->sw_irq_tracker->search_hint = 0; - pf->hw_irq_tracker->search_hint = 0; - err = ice_vsi_rebuild_all(pf); if (err) { dev_err(dev, "ice_vsi_rebuild_all failed\n"); @@ -3857,16 +3907,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) u8 count = 0; if (new_mtu == netdev->mtu) { - netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); + netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); return 0; } if (new_mtu < netdev->min_mtu) { - netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", + netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", netdev->min_mtu); return -EINVAL; } else if (new_mtu > netdev->max_mtu) { - netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", + netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", netdev->min_mtu); return -EINVAL; } @@ -3882,7 +3932,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } while (count < 100); if (count == 100) { - netdev_err(netdev, "can't change mtu. Device is busy\n"); + netdev_err(netdev, "can't change MTU. Device is busy\n"); return -EBUSY; } @@ -3894,18 +3944,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) err = ice_down(vsi); if (err) { - netdev_err(netdev, "change mtu if_up err %d\n", err); + netdev_err(netdev, "change MTU if_up err %d\n", err); return err; } err = ice_up(vsi); if (err) { - netdev_err(netdev, "change mtu if_up err %d\n", err); + netdev_err(netdev, "change MTU if_up err %d\n", err); return err; } } - netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); + netdev_info(netdev, "changed MTU to %d\n", new_mtu); return 0; } @@ -4241,7 +4291,7 @@ static void ice_tx_timeout(struct net_device *netdev) * * Returns 0 on success, negative value on failure */ -static int ice_open(struct net_device *netdev) +int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -4278,7 +4328,7 @@ static int ice_open(struct net_device *netdev) * * Returns success only - not allowed to fail */ -static int ice_stop(struct net_device *netdev) +int ice_stop(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 62571d33d0d6..bcb431f1bd92 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -119,7 +119,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) status = ice_read_sr_aq(hw, offset, 1, data, true); if (!status) - *data = le16_to_cpu(*(__le16 *)data); + *data = le16_to_cpu(*(__force __le16 *)data); return status; } @@ -174,7 +174,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) } while (words_read < *words); for (i = 0; i < *words; i++) - data[i] = le16_to_cpu(((__le16 *)data)[i]); + data[i] = le16_to_cpu(((__force __le16 *)data)[i]); read_nvm_buf_aq_exit: *words = words_read; @@ -316,3 +316,34 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) return status; } + +/** + * ice_nvm_validate_checksum + * @hw: pointer to the HW struct + * + * Verify NVM PFA checksum validity (0x0706) + */ +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +{ + struct ice_aqc_nvm_checksum *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (status) + return status; + + cmd = &desc.params.nvm_checksum; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); + cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + ice_release_nvm(hw); + + if (!status) + if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) + status = ICE_ERR_NVM_CHECKSUM; + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 8d49f83be7a5..2a232504379d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -683,10 +683,10 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, u16 i, num_groups_added = 0; enum ice_status status = 0; struct ice_hw *hw = pi->hw; - u16 buf_size; + size_t buf_size; u32 teid; - buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1); + buf_size = struct_size(buf, generic, num_nodes - 1); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 17afe6acb18a..c01597885629 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -26,6 +26,7 @@ enum ice_status { ICE_ERR_IN_USE = -16, ICE_ERR_MAX_LIMIT = -17, ICE_ERR_RESET_ONGOING = -18, + ICE_ERR_NVM_CHECKSUM = -51, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, ICE_ERR_AQ_ERROR = -100, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 9f1f595ae7e6..8271fd651725 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -799,7 +799,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, daddr = f_info->l_data.ethertype_mac.mac_addr; /* fall-through */ case ICE_SW_LKUP_ETHERTYPE: - off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); + off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); break; case ICE_SW_LKUP_MAC_VLAN: @@ -829,7 +829,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); if (!(vlan_id > ICE_MAX_VLAN_ID)) { - off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); + off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = cpu_to_be16(vlan_id); } @@ -1973,6 +1973,10 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) * ice_add_eth_mac - Add ethertype and MAC based filter rule * @hw: pointer to the hardware structure * @em_list: list of ether type MAC filter, MAC is optional + * + * This function requires the caller to populate the entries in + * the filter list with the necessary fields (including flags to + * indicate Tx or Rx rules). */ enum ice_status ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) @@ -1990,7 +1994,6 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) l_type != ICE_SW_LKUP_ETHERTYPE) return ICE_ERR_PARAM; - em_list_itr->fltr_info.flag = ICE_FLTR_TX; em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr); if (em_list_itr->status) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 732b0b9b2e15..cb123fbe30be 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -8,9 +8,11 @@ #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_DFLT_VSI_INVAL 0xff +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF -#define ICE_INVAL_Q_HANDLE 0xFFFF /* VSI queue context structure */ struct ice_q_ctx { @@ -69,9 +71,6 @@ struct ice_fltr_info { /* rule ID returned by firmware once filter rule is created */ u16 fltr_rule_id; u16 flag; -#define ICE_FLTR_RX BIT(0) -#define ICE_FLTR_TX BIT(1) -#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 2364eaf33d23..3c83230434b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -55,7 +55,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) if (!tx_ring->tx_buf) return; - /* Free all the Tx ring sk_bufss */ + /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); @@ -1101,7 +1101,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic * @port_info: port_info structure containing the current link speed * @avg_pkt_size: average size of Tx or Rx packets based on clean routine - * @itr: itr value to update + * @itr: ITR value to update * * Calculate how big of an increment should be applied to the ITR value passed * in based on wmem_default, SKB overhead, Ethernet overhead, and the current @@ -1316,7 +1316,7 @@ clear_counts: */ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) { - /* The itr value is reported in microseconds, and the register value is + /* The ITR value is reported in microseconds, and the register value is * recorded in 2 microsecond units. For this reason we only need to * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this * granularity as a shift instead of division. The mask makes sure the @@ -1645,7 +1645,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, return; dma_error: - /* clear dma mappings for failed tx_buf map */ + /* clear DMA mappings for failed tx_buf map */ for (;;) { tx_buf = &tx_ring->tx_buf[i]; ice_unmap_and_free_tx_buf(tx_ring, tx_buf); @@ -1874,10 +1874,10 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) cd_mss = skb_shinfo(skb)->gso_size; /* record cdesc_qw1 with TSO parameters */ - off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX | - (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | - (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | - (cd_mss << ICE_TXD_CTX_QW1_MSS_S); + off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | + (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | + (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); first->tx_flags |= ICE_TX_FLAGS_TSO; return 1; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 66e05032ee56..ec76aba347b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -58,19 +58,19 @@ struct ice_tx_buf { unsigned int bytecount; unsigned short gso_segs; u32 tx_flags; - DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); + DEFINE_DMA_UNMAP_ADDR(dma); }; struct ice_tx_offload_params { - u8 header_len; + u64 cd_qw1; + struct ice_ring *tx_ring; u32 td_cmd; u32 td_offset; u32 td_l2tag1; - u16 cd_l2tag2; u32 cd_tunnel_params; - u64 cd_qw1; - struct ice_ring *tx_ring; + u16 cd_l2tag2; + u8 header_len; }; struct ice_rx_buf { @@ -150,6 +150,7 @@ enum ice_rx_dtype { /* descriptor ring, associated with a VSI */ struct ice_ring { + /* CL1 - 1st cacheline starts here */ struct ice_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ @@ -161,11 +162,11 @@ struct ice_ring { struct ice_tx_buf *tx_buf; struct ice_rx_buf *rx_buf; }; + /* CL2 - 2nd cacheline starts here */ u16 q_index; /* Queue number of ring */ - u32 txq_teid; /* Added Tx queue TEID */ -#ifdef CONFIG_DCB - u8 dcb_tc; /* Traffic class of ring */ -#endif /* CONFIG_DCB */ + u16 q_handle; /* Queue handle per TC */ + + u8 ring_active:1; /* is ring online or not */ u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -173,8 +174,7 @@ struct ice_ring { /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - - u8 ring_active; /* is ring online or not */ + u16 next_to_alloc; /* stats structs */ struct ice_q_stats stats; @@ -184,10 +184,17 @@ struct ice_ring { struct ice_rxq_stats rx_stats; }; - unsigned int size; /* length of descriptor ring in bytes */ - dma_addr_t dma; /* physical address of ring */ struct rcu_head rcu; /* to avoid race on free */ - u16 next_to_alloc; + /* CLX - the below items are only accessed infrequently and should be + * in their own cache line if possible + */ + dma_addr_t dma; /* physical address of ring */ + unsigned int size; /* length of descriptor ring in bytes */ + u32 txq_teid; /* Added Tx queue TEID */ + u16 rx_buf_len; +#ifdef CONFIG_DCB + u8 dcb_tc; /* Traffic class of ring */ +#endif /* CONFIG_DCB */ } ____cacheline_internodealigned_in_smp; struct ice_ring_container { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a862af4cbf78..24bbef8bbe69 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -23,6 +23,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) +#define ICE_DBG_FW_LOG BIT_ULL(3) #define ICE_DBG_LINK BIT_ULL(4) #define ICE_DBG_PHY BIT_ULL(5) #define ICE_DBG_QCTX BIT_ULL(6) @@ -61,6 +62,13 @@ enum ice_fc_mode { ICE_FC_DFLT }; +enum ice_fec_mode { + ICE_FEC_NONE = 0, + ICE_FEC_RS, + ICE_FEC_BASER, + ICE_FEC_AUTO +}; + enum ice_set_fc_aq_failures { ICE_SET_FC_AQ_FAIL_NONE = 0, ICE_SET_FC_AQ_FAIL_GET, @@ -86,12 +94,14 @@ enum ice_media_type { enum ice_vsi_type { ICE_VSI_PF = 0, ICE_VSI_VF, + ICE_VSI_LB = 6, }; struct ice_link_status { /* Refer to ice_aq_phy_type for bits definition */ u64 phy_type_low; u64 phy_type_high; + u8 topo_media_conflict; u16 max_frame_size; u16 link_speed; u16 req_speeds; @@ -99,6 +109,7 @@ struct ice_link_status { u8 link_info; u8 an_info; u8 ext_info; + u8 fec_info; u8 pacing; /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of * ice_aqc_get_phy_caps structure @@ -423,7 +434,7 @@ struct ice_hw { struct ice_fw_log_cfg fw_log; /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL - * register. Used for determining the itr/intrl granularity during + * register. Used for determining the ITR/intrl granularity during * initialization. */ #define ICE_MAX_AGG_BW_200G 0x0 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index a805cbdd69be..5d24b539648f 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -103,7 +103,7 @@ ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, u16 link_speed; if (link_up) - link_speed = ICE_AQ_LINK_SPEED_40GB; + link_speed = ICE_AQ_LINK_SPEED_100GB; else link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; @@ -141,32 +141,20 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) } /** - * ice_get_vf_vector - get VF interrupt vector register offset - * @vf_msix: number of MSIx vector per VF on a PF - * @vf_id: VF identifier - * @i: index of MSIx vector - */ -static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i) -{ - return ((i == 0) ? VFINT_DYN_CTLN(vf_id) : - VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1))); -} - -/** * ice_free_vf_res - Free a VF's resources * @vf: pointer to the VF info */ static void ice_free_vf_res(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; - int i, pf_vf_msix; + int i, last_vector_idx; /* First, disable VF's configuration API to prevent OS from * accessing the VF's VSI after it's freed or invalidated. */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - /* free vsi & disconnect it from the parent uplink */ + /* free VSI and disconnect it from the parent uplink */ if (vf->lan_vsi_idx) { ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); vf->lan_vsi_idx = 0; @@ -174,13 +162,10 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - pf_vf_msix = pf->num_vf_msix; + last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1; /* Disable interrupts so that VF starts in a known state */ - for (i = 0; i < pf_vf_msix; i++) { - u32 reg_idx; - - reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i); - wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M); + for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { + wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); ice_flush(&pf->hw); } /* reset some of the state variables keeping track of the resources */ @@ -205,8 +190,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC(vf->vf_id), 0); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); - first = vf->first_vector_idx + - hw->func_caps.common_cap.msix_vector_first_id; + first = vf->first_vector_idx; last = first + pf->num_vf_msix - 1; for (v = first; v <= last; v++) { u32 reg; @@ -232,6 +216,42 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) } /** + * ice_sriov_free_msix_res - Reset/free any used MSIX resources + * @pf: pointer to the PF structure + * + * If MSIX entries from the pf->irq_tracker were needed then we need to + * reset the irq_tracker->end and give back the entries we needed to + * num_avail_sw_msix. + * + * If no MSIX entries were taken from the pf->irq_tracker then just clear + * the pf->sriov_base_vector. + * + * Returns 0 on success, and -EINVAL on error. + */ +static int ice_sriov_free_msix_res(struct ice_pf *pf) +{ + struct ice_res_tracker *res; + + if (!pf) + return -EINVAL; + + res = pf->irq_tracker; + if (!res) + return -EINVAL; + + /* give back irq_tracker resources used */ + if (pf->sriov_base_vector < res->num_entries) { + res->end = res->num_entries; + pf->num_avail_sw_msix += + res->num_entries - pf->sriov_base_vector; + } + + pf->sriov_base_vector = 0; + + return 0; +} + +/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ @@ -246,15 +266,6 @@ void ice_free_vfs(struct ice_pf *pf) while (test_and_set_bit(__ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); - /* Disable IOV before freeing resources. This lets any VF drivers - * running in the host get themselves cleaned up before we yank - * the carpet out from underneath their feet. - */ - if (!pci_vfs_assigned(pf->pdev)) - pci_disable_sriov(pf->pdev); - else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { struct ice_vsi *vsi; @@ -270,6 +281,15 @@ void ice_free_vfs(struct ice_pf *pf) clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); } + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + tmp = pf->num_alloc_vfs; pf->num_vf_qps = 0; pf->num_alloc_vfs = 0; @@ -288,6 +308,10 @@ void ice_free_vfs(struct ice_pf *pf) } } + if (ice_sriov_free_msix_res(pf)) + dev_err(&pf->pdev->dev, + "Failed to free MSIX resources used by SR-IOV\n"); + devm_kfree(&pf->pdev->dev, pf->vf); pf->vf = NULL; @@ -457,6 +481,22 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) } /** + * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW + * @pf: pointer to PF structure + * @vf: pointer to VF that the first MSIX vector index is being calculated for + * + * This returns the first MSIX vector index in HW that is used by this VF and + * this will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration will have to increment by + * 1 to avoid meddling with the OICR index. + */ +static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) +{ + return pf->hw.func_caps.common_cap.msix_vector_first_id + + pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; +} + +/** * ice_alloc_vsi_res - Setup VF VSI and its resources * @vf: pointer to the VF structure * @@ -470,8 +510,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int status = 0; - vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); + /* first vector index is the VFs OICR index */ + vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); return -ENOMEM; @@ -480,14 +522,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_num = vsi->vsi_num; - /* first vector index is the VFs OICR index */ - vf->first_vector_idx = vsi->hw_base_vector; - /* Since hw_base_vector holds the vector where data queue interrupts - * starts, increment by 1 since VFs allocated vectors include OICR intr - * as well. - */ - vsi->hw_base_vector += 1; - /* Check if port VLAN exist before, and restore it accordingly */ if (vf->port_vlan_id) { ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true); @@ -580,8 +614,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; - first = vf->first_vector_idx + - hw->func_caps.common_cap.msix_vector_first_id; + first = vf->first_vector_idx; last = (first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; @@ -687,6 +720,97 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) } /** + * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space + * @vf: VF to calculate the register index for + * @q_vector: a q_vector associated to the VF + */ +int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) +{ + struct ice_pf *pf; + + if (!vf || !q_vector) + return -EINVAL; + + pf = vf->pf; + + /* always add one to account for the OICR being the first MSIX */ + return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id + + q_vector->v_idx + 1; +} + +/** + * ice_get_max_valid_res_idx - Get the max valid resource index + * @res: pointer to the resource to find the max valid index for + * + * Start from the end of the ice_res_tracker and return right when we find the + * first res->list entry with the ICE_RES_VALID_BIT set. This function is only + * valid for SR-IOV because it is the only consumer that manipulates the + * res->end and this is always called when res->end is set to res->num_entries. + */ +static int ice_get_max_valid_res_idx(struct ice_res_tracker *res) +{ + int i; + + if (!res) + return -EINVAL; + + for (i = res->num_entries - 1; i >= 0; i--) + if (res->list[i] & ICE_RES_VALID_BIT) + return i; + + return 0; +} + +/** + * ice_sriov_set_msix_res - Set any used MSIX resources + * @pf: pointer to PF structure + * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs + * + * This function allows SR-IOV resources to be taken from the end of the PF's + * allowed HW MSIX vectors so in many cases the irq_tracker will not + * be needed. In these cases we just set the pf->sriov_base_vector and return + * success. + * + * If SR-IOV needs to use any pf->irq_tracker entries it updates the + * irq_tracker->end based on the first entry needed for SR-IOV. This makes it + * so any calls to ice_get_res() using the irq_tracker will not try to use + * resources at or beyond the newly set value. + * + * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in + * in the PF's space available for SR-IOV. + */ +static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) +{ + int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + u16 pf_total_msix_vectors = + pf->hw.func_caps.common_cap.num_msix_vectors; + struct ice_res_tracker *res = pf->irq_tracker; + int sriov_base_vector; + + if (max_valid_res_idx < 0) + return max_valid_res_idx; + + sriov_base_vector = pf_total_msix_vectors - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector <= max_valid_res_idx) + return -EINVAL; + + pf->sriov_base_vector = sriov_base_vector; + + /* dip into irq_tracker entries and update used resources */ + if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) { + pf->num_avail_sw_msix -= + res->num_entries - pf->sriov_base_vector; + res->end = pf->sriov_base_vector; + } + + return 0; +} + +/** * ice_check_avail_res - check if vectors and queues are available * @pf: pointer to the PF structure * @@ -696,11 +820,16 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) */ static int ice_check_avail_res(struct ice_pf *pf) { - u16 num_msix, num_txq, num_rxq; + int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); + u16 num_msix, num_txq, num_rxq, num_avail_msix; - if (!pf->num_alloc_vfs) + if (!pf->num_alloc_vfs || max_valid_res_idx < 0) return -EINVAL; + /* add 1 to max_valid_res_idx to account for it being 0-based */ + num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors - + (max_valid_res_idx + 1); + /* Grab from HW interrupts common pool * Note: By the time the user decides it needs more vectors in a VF * its already too late since one must decide this prior to creating the @@ -717,11 +846,11 @@ static int ice_check_avail_res(struct ice_pf *pf) * grab default interrupt vectors (5 as supported by AVF driver). */ if (pf->num_alloc_vfs <= 16) { - num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + num_msix = ice_determine_res(pf, num_avail_msix, ICE_MAX_INTR_PER_VF, ICE_MIN_INTR_PER_VF); } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { - num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + num_msix = ice_determine_res(pf, num_avail_msix, ICE_DFLT_INTR_PER_VF, ICE_MIN_INTR_PER_VF); } else { @@ -750,6 +879,9 @@ static int ice_check_avail_res(struct ice_pf *pf) if (!num_txq || !num_rxq) return -EIO; + if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs)) + return -EINVAL; + /* since AVF driver works with only queue pairs which means, it expects * to have equal number of Rx and Tx queues, so take the minimum of * available Tx or Rx queues @@ -938,6 +1070,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) vf->num_vf_qs = 0; } + if (ice_sriov_free_msix_res(pf)) + dev_err(&pf->pdev->dev, + "Failed to free MSIX resources used by SR-IOV\n"); + if (ice_check_avail_res(pf)) { dev_err(&pf->pdev->dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); @@ -1119,7 +1255,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) int i, ret; /* Disable global interrupt 0 so we don't try to handle the VFLR. */ - wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); ice_flush(hw); @@ -1134,7 +1270,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) GFP_KERNEL); if (!vfs) { ret = -ENOMEM; - goto err_unroll_sriov; + goto err_pci_disable_sriov; } pf->vf = vfs; @@ -1154,12 +1290,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) pf->num_alloc_vfs = num_alloc_vfs; /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, true)) + if (!ice_reset_all_vfs(pf, true)) { + ret = -EIO; goto err_unroll_sriov; + } goto err_unroll_intr; err_unroll_sriov: + pf->vf = NULL; + devm_kfree(&pf->pdev->dev, vfs); + vfs = NULL; + pf->num_alloc_vfs = 0; +err_pci_disable_sriov: pci_disable_sriov(pf->pdev); err_unroll_intr: /* rearm interrupts here */ @@ -1168,8 +1311,8 @@ err_unroll_intr: } /** - * ice_pf_state_is_nominal - checks the pf for nominal state - * @pf: pointer to pf to check + * ice_pf_state_is_nominal - checks the PF for nominal state + * @pf: pointer to PF to check * * Check the PF's state for a collection of bits that would indicate * the PF is in a state that would inhibit normal operation for @@ -1496,7 +1639,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) /** * ice_find_vsi_from_id - * @pf: the pf structure to search for the VSI + * @pf: the PF structure to search for the VSI * @id: ID of the VSI it is searching for * * searches for the VSI with the given ID @@ -1807,28 +1950,37 @@ error_param: static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct virtchnl_irq_map_info *irqmap_info = - (struct virtchnl_irq_map_info *)msg; + struct virtchnl_irq_map_info *irqmap_info; u16 vsi_id, vsi_q_id, vector_id; struct virtchnl_vector_map *map; - struct ice_vsi *vsi = NULL; struct ice_pf *pf = vf->pf; + u16 num_q_vectors_mapped; + struct ice_vsi *vsi; unsigned long qmap; - u16 num_q_vectors; int i; - num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF; + irqmap_info = (struct virtchnl_irq_map_info *)msg; + num_q_vectors_mapped = irqmap_info->num_vectors; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + /* Check to make sure number of VF vectors mapped is not greater than + * number of VF vectors originally allocated, and check that + * there is actually at least a single VF queue vector mapped + */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - !vsi || vsi->num_q_vectors < num_q_vectors || - irqmap_info->num_vectors == 0) { + pf->num_vf_msix < num_q_vectors_mapped || + !irqmap_info->num_vectors) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - for (i = 0; i < num_q_vectors; i++) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; + for (i = 0; i < num_q_vectors_mapped; i++) { + struct ice_q_vector *q_vector; map = &irqmap_info->vecmap[i]; @@ -1836,7 +1988,21 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) vsi_id = map->vsi_id; /* validate msg params */ if (!(vector_id < pf->hw.func_caps.common_cap - .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { + .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || + (!vector_id && (map->rxq_map || map->txq_map))) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* No need to map VF miscellaneous or rogue vector */ + if (!vector_id) + continue; + + /* Subtract non queue vector from vector_id passed by VF + * to get actual number of VSI queue vector array index + */ + q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; + if (!q_vector) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1852,6 +2018,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) q_vector->num_ring_rx++; q_vector->rx.itr_idx = map->rxitr_idx; vsi->rx_rings[vsi_q_id]->q_vector = q_vector; + ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + q_vector->rx.itr_idx); } qmap = map->txq_map; @@ -1864,11 +2032,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) q_vector->num_ring_tx++; q_vector->tx.itr_idx = map->txitr_idx; vsi->tx_rings[vsi_q_id]->q_vector = q_vector; + ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + q_vector->tx.itr_idx); } } - if (vsi) - ice_vsi_cfg_msix(vsi); error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, @@ -1903,9 +2071,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (!vsi) goto error_param; - } if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 3725aea16840..c3ca522c245a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -49,29 +49,34 @@ struct ice_vf { struct ice_pf *pf; s16 vf_id; /* VF ID in the PF space */ - u32 driver_caps; /* reported by VF driver */ + u16 lan_vsi_idx; /* index into PF struct */ int first_vector_idx; /* first vector index of this VF */ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; + u32 driver_caps; /* reported by VF driver */ struct virtchnl_ether_addr dflt_lan_addr; u16 port_vlan_id; - u8 pf_set_mac; /* VF MAC address set by VMM admin */ - u8 trusted; - u16 lan_vsi_idx; /* index into PF struct */ + u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ + u8 trusted:1; + u8 spoofchk:1; + u8 link_forced:1; + u8 link_up:1; /* only valid if VF link is forced */ + /* VSI indices - actual VSI pointers are maintained in the PF structure + * When assigned, these will be non-zero, because VSI 0 is always + * the main LAN VSI for the PF. + */ u16 lan_vsi_num; /* ID as used by firmware */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + u64 num_mdd_events; /* number of MDD events detected */ u64 num_inval_msgs; /* number of continuous invalid msgs */ u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ - DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ - u8 link_forced; - u8 link_up; /* only valid if VF link is forced */ - u8 spoofchk; + u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; u16 num_vlan; u16 num_vf_qs; /* num of queue configured per VF */ - u8 num_req_qs; /* num of queue pairs requested by VF */ }; #ifdef CONFIG_PCI_IOV @@ -96,6 +101,8 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); + +int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) @@ -161,5 +168,11 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } +static inline int +ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, + struct ice_q_vector __always_unused *q_vector) +{ + return 0; +} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index bafdcf70a353..3ec2ce0725d5 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) dev_spec->sgmii_active = true; break; } - /* fall through for I2C based SGMII */ + /* fall through - for I2C based SGMII */ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: /* read media type from SFP EEPROM */ ret_val = igb_set_sfp_media_type_82575(hw); diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 0ad737d2f289..9cb49980ec2d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -409,6 +409,8 @@ do { \ #define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) #define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) +#define E1000_I210_RR2DCDELAY 0x5BF4 + #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c645d9e648e0..3182b059bf55 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -448,7 +448,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data) static int igb_get_regs_len(struct net_device *netdev) { -#define IGB_REGS_LEN 739 +#define IGB_REGS_LEN 740 return IGB_REGS_LEN * sizeof(u32); } @@ -675,41 +675,44 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[554] = adapter->stats.b2ogprc; } - if (hw->mac.type != e1000_82576) - return; - for (i = 0; i < 12; i++) - regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); - for (i = 0; i < 4; i++) - regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); - - for (i = 0; i < 12; i++) - regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); - for (i = 0; i < 12; i++) - regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + if (hw->mac.type == e1000_82576) { + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + regs_buff[739] = rd32(E1000_I210_RR2DCDELAY); } static int igb_get_eeprom_len(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 39f33afc479c..b4df3e319467 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -753,6 +753,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg) struct net_device *netdev = igb->netdev; hw->hw_addr = NULL; netdev_err(netdev, "PCIe link lost\n"); + WARN(1, "igb: Failed to read reg 0x%x!\n", reg); } return value; @@ -2577,11 +2578,11 @@ static int igb_offload_cbs(struct igb_adapter *adapter, #define VLAN_PRIO_FULL_MASK (0x07) static int igb_parse_cls_flower(struct igb_adapter *adapter, - struct tc_cls_flower_offload *f, + struct flow_cls_offload *f, int traffic_class, struct igb_nfc_filter *input) { - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = f->common.extack; @@ -2659,7 +2660,7 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, } static int igb_configure_clsflower(struct igb_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { struct netlink_ext_ack *extack = cls_flower->common.extack; struct igb_nfc_filter *filter, *f; @@ -2721,7 +2722,7 @@ err_parse: } static int igb_delete_clsflower(struct igb_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { struct igb_nfc_filter *filter; int err; @@ -2751,14 +2752,14 @@ out: } static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) + struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { - case TC_CLSFLOWER_REPLACE: + case FLOW_CLS_REPLACE: return igb_configure_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_DESTROY: + case FLOW_CLS_DESTROY: return igb_delete_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_STATS: + case FLOW_CLS_STATS: return -EOPNOTSUPP; default: return -EOPNOTSUPP; @@ -2782,25 +2783,6 @@ static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int igb_setup_tc_block(struct igb_adapter *adapter, - struct tc_block_offload *f) -{ - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int igb_offload_txtime(struct igb_adapter *adapter, struct tc_etf_qopt_offload *qopt) { @@ -2824,6 +2806,8 @@ static int igb_offload_txtime(struct igb_adapter *adapter, return 0; } +static LIST_HEAD(igb_block_cb_list); + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2833,7 +2817,11 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: - return igb_setup_tc_block(adapter, type_data); + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, + igb_setup_tc_block_cb, + adapter, adapter, true); + case TC_SETUP_QDISC_ETF: return igb_offload_txtime(adapter, type_data); @@ -5687,6 +5675,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, */ if (tx_ring->launchtime_enable) { ts = ns_to_timespec64(first->skb->tstamp); + first->skb->tstamp = 0; context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; @@ -6695,7 +6684,7 @@ static int __igb_notify_dca(struct device *dev, void *data) igb_setup_dca(adapter); break; } - /* Fall Through since DCA is disabled. */ + /* Fall Through - since DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 51a8b8769c67..59258d791106 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -10,50 +10,6 @@ #include "igc.h" /** - * igc_set_pcie_completion_timeout - set pci-e completion timeout - * @hw: pointer to the HW structure - */ -static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw) -{ - u32 gcr = rd32(IGC_GCR); - u16 pcie_devctl2; - s32 ret_val = 0; - - /* only take action if timeout value is defaulted to 0 */ - if (gcr & IGC_GCR_CMPL_TMOUT_MASK) - goto out; - - /* if capabilities version is type 1 we can write the - * timeout of 10ms to 200ms through the GCR register - */ - if (!(gcr & IGC_GCR_CAP_VER2)) { - gcr |= IGC_GCR_CMPL_TMOUT_10ms; - goto out; - } - - /* for version 2 capabilities we need to write the config space - * directly in order to set the completion timeout value for - * 16ms to 55ms - */ - ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); - if (ret_val) - goto out; - - pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; - - ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, - &pcie_devctl2); -out: - /* disable completion timeout resend */ - gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND; - - wr32(IGC_GCR, gcr); - - return ret_val; -} - -/** * igc_reset_hw_base - Reset hardware * @hw: pointer to the HW structure * @@ -72,11 +28,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); - /* set the completion timeout for interface */ - ret_val = igc_set_pcie_completion_timeout(hw); - if (ret_val) - hw_dbg("PCI-E Set completion timeout has failed.\n"); - hw_dbg("Masking off all interrupts\n"); wr32(IGC_IMC, 0xffffffff); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index a9a30268de59..fc0ccfe38a20 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -5,8 +5,8 @@ #define _IGC_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ @@ -29,12 +29,6 @@ /* Status of Master requests. */ #define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 -/* PCI Express Control */ -#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000 -#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000 -#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000 -#define IGC_GCR_CAP_VER2 0x00040000 - /* Receive Address * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. @@ -72,6 +66,9 @@ #define IGC_CONNSW_AUTOSENSE_EN 0x1 +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + /* PBA constants */ #define IGC_PBA_34K 0x0022 @@ -264,9 +261,6 @@ #define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ #define IGC_TCTL_MULR 0x10000000 /* Multiple request support */ -#define IGC_CT_SHIFT 4 -#define IGC_COLLISION_THRESHOLD 15 - /* Flow Control Constants */ #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 @@ -398,7 +392,7 @@ #define IGC_MDIC_ERROR 0x40000000 #define IGC_MDIC_DEST 0x80000000 -#define IGC_N0_QUEUE -1 +#define IGC_N0_QUEUE -1 #define IGC_MAX_MAC_HDR_LEN 127 #define IGC_MAX_NETWORK_HDR_LEN 511 diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 7c88b7bd4799..1039a224ac80 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -114,11 +114,8 @@ struct igc_nvm_operations { struct igc_phy_operations { s32 (*acquire)(struct igc_hw *hw); - s32 (*check_polarity)(struct igc_hw *hw); s32 (*check_reset_block)(struct igc_hw *hw); s32 (*force_speed_duplex)(struct igc_hw *hw); - s32 (*get_cfg_done)(struct igc_hw *hw); - s32 (*get_cable_length)(struct igc_hw *hw); s32 (*get_phy_info)(struct igc_hw *hw); s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); void (*release)(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index f7683d3ae47c..ba4646737288 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -8,7 +8,6 @@ #include "igc_hw.h" /* forward declaration */ -static s32 igc_set_default_fc(struct igc_hw *hw); static s32 igc_set_fc_watermarks(struct igc_hw *hw); /** @@ -96,13 +95,10 @@ s32 igc_setup_link(struct igc_hw *hw) goto out; /* If requested flow control is set to default, set flow control - * based on the EEPROM flow control settings. + * to the both 'rx' and 'tx' pause frames. */ - if (hw->fc.requested_mode == igc_fc_default) { - ret_val = igc_set_default_fc(hw); - if (ret_val) - goto out; - } + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; /* We want to save off the original Flow Control configuration just * in case we get disconnected and then reconnected into a different @@ -136,19 +132,6 @@ out: } /** - * igc_set_default_fc - Set flow control default values - * @hw: pointer to the HW structure - * - * Read the EEPROM for the default values for flow control and store the - * values. - */ -static s32 igc_set_default_fc(struct igc_hw *hw) -{ - hw->fc.requested_mode = igc_fc_full; - return 0; -} - -/** * igc_force_mac_fc - Force the MAC's flow control settings * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 34fa0e60a780..93f3b4e6185b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -72,6 +72,27 @@ void igc_reset(struct igc_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; hw->mac.ops.reset_hw(hw); @@ -3934,6 +3955,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) hw->hw_addr = NULL; netif_device_detach(netdev); netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(1, "igc: Failed to read reg 0x%x!\n", reg); } return value; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 08d85e336bd4..39e73ad60352 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -50,8 +50,6 @@ #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 -#define IXGBE_ETH_P_LLDP 0x88CC - /* flow control */ #define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 @@ -635,6 +633,7 @@ struct ixgbe_adapter { /* XDP */ int num_xdp_queues; struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */ /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -774,11 +773,6 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_IPSEC struct ixgbe_ipsec *ipsec; #endif /* CONFIG_IXGBE_IPSEC */ - - /* AF_XDP zero-copy */ - struct xdp_umem **xsk_umems; - u16 num_xsk_umems_used; - u16 num_xsk_umems; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -1039,4 +1033,10 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf) { return -EACCES; } #endif /* CONFIG_IXGBE_IPSEC */ + +static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index acba067cc15a..7c52ae8ac005 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3226,7 +3226,8 @@ static int ixgbe_get_module_info(struct net_device *dev, page_swap = true; } - if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap || + !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) { /* We have a SFP, but it does not support SFF-8472 */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index ff85ce5791a3..31629fc7e820 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -842,6 +842,9 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) struct ixgbe_ipsec *ipsec = adapter->ipsec; int i; + if (!ipsec) + return; + /* search rx sa table */ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { if (!ipsec->rx_tbl[i].used) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 57fd9ee6de66..cbaf712d6529 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6288,6 +6288,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (ixgbe_init_rss_key(adapter)) return -ENOMEM; + adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + if (!adapter->af_xdp_zc_qps) + return -ENOMEM; + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -9603,27 +9607,6 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } -static int ixgbe_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - static int ixgbe_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt *mqprio) { @@ -9631,12 +9614,19 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev, return ixgbe_setup_tc(dev, mqprio->num_tc); } +static LIST_HEAD(ixgbe_block_cb_list); + static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { + struct ixgbe_adapter *adapter = netdev_priv(dev); + switch (type) { case TC_SETUP_BLOCK: - return ixgbe_setup_tc_block(dev, type_data); + return flow_block_cb_setup_simple(type_data, + &ixgbe_block_cb_list, + ixgbe_setup_tc_block_cb, + adapter, adapter, true); case TC_SETUP_QDISC_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: @@ -11161,6 +11151,7 @@ err_sw_init: kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); kfree(adapter->rss_key); + bitmap_free(adapter->af_xdp_zc_qps); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -11249,6 +11240,7 @@ static void ixgbe_remove(struct pci_dev *pdev) kfree(adapter->mac_table); kfree(adapter->rss_key); + bitmap_free(adapter->af_xdp_zc_qps); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 214b01085718..6544c4539c0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -45,6 +45,7 @@ #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_DDM_IMPLEMENTED 0x40 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index d81a50dc9535..0be13a90ff79 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -72,13 +72,13 @@ #define IXGBE_INCPER_SHIFT_82599 24 #define IXGBE_OVERFLOW_PERIOD (HZ * 30) -#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) +#define IXGBE_PTP_TX_TIMEOUT (HZ) -/* half of a one second clock period, for use with PPS signal. We have to use - * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in - * order to force at least 64bits of precision for shifting +/* We use our own definitions instead of NSEC_PER_SEC because we want to mark + * the value as a ULL to force precision when bit shifting. */ -#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL +#define NS_PER_SEC 1000000000ULL +#define NS_PER_HALF_SEC 500000000ULL /* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL * which contain measurements of seconds and nanoseconds respectively. This @@ -141,23 +141,26 @@ #define MAX_TIMADJ 0x7FFFFFFF /** - * ixgbe_ptp_setup_sdp_x540 + * ixgbe_ptp_setup_sdp_X540 * @adapter: private adapter structure * * this function enables or disables the clock out feature on SDP0 for - * the X540 device. It will create a 1second periodic output that can + * the X540 device. It will create a 1 second periodic output that can * be used as the PPS (via an interrupt). * - * It calculates when the systime will be on an exact second, and then - * aligns the start of the PPS signal to that value. The shift is - * necessary because it can change based on the link speed. + * It calculates when the system time will be on an exact second, and then + * aligns the start of the PPS signal to that value. + * + * This works by using the cycle counter shift and mult values in reverse, and + * assumes that the values we're shifting will not overflow. */ -static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) +static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) { + struct cyclecounter *cc = &adapter->hw_cc; struct ixgbe_hw *hw = &adapter->hw; - int shift = adapter->hw_cc.shift; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; - u64 ns = 0, clock_edge = 0; + u64 ns = 0, clock_edge = 0, clock_period; + unsigned long flags; /* disable the pin first */ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); @@ -177,26 +180,33 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) /* enable the Clock Out feature on SDP0, and allow * interrupts to occur when the pin changes */ - tsauxc = IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT; + tsauxc = (IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT); - /* clock period (or pulse length) */ - clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); - clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); - - /* Account for the cyclecounter wrap-around value by - * using the converted ns value of the current time to - * check for when the next aligned second would occur. + /* Determine the clock time period to use. This assumes that the + * cycle counter shift is small enough to avoid overflow. */ - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); + clock_period = div_u64((NS_PER_HALF_SEC << cc->shift), cc->mult); + clktiml = (u32)(clock_period); + clktimh = (u32)(clock_period >> 32); - div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); - clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + clock_edge = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + /* Figure out how many seconds to add in order to round up */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); - /* specify the initial clock start time */ + /* Adjust the clock edge to align with the next full second. */ + clock_edge += div_u64(((u64)rem << cc->shift), cc->mult); trgttiml = (u32)clock_edge; trgttimh = (u32)(clock_edge >> 32); @@ -212,8 +222,100 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) } /** + * ixgbe_ptp_setup_sdp_X550 + * @adapter: private adapter structure + * + * Enable or disable a clock output signal on SDP 0 for X550 hardware. + * + * Use the target time feature to align the output signal on the next full + * second. + * + * This works by using the cycle counter shift and mult values in reverse, and + * assumes that the values we're shifting will not overflow. + */ +static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter) +{ + u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp; + struct cyclecounter *cc = &adapter->hw_cc; + struct ixgbe_hw *hw = &adapter->hw; + u64 ns = 0, clock_edge = 0; + struct timespec64 ts; + unsigned long flags; + + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); + + if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) + return; + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE; + + /* enable the Clock Out feature on SDP0, and use Target Time 0 to + * enable generation of interrupts on the clock change. + */ +#define IXGBE_TSAUXC_DIS_TS_CLEAR 0x40000000 + tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_ST0 | + IXGBE_TSAUXC_EN_TT0 | IXGBE_TSAUXC_SDP0_INT | + IXGBE_TSAUXC_DIS_TS_CLEAR); + + tssdp = (IXGBE_TSSDP_TS_SDP0_EN | + IXGBE_TSSDP_TS_SDP0_CLK0); + + /* Determine the clock time period to use. This assumes that the + * cycle counter shift is small enough to avoid overflowing a 32bit + * value. + */ + freqout = div_u64(NS_PER_HALF_SEC << cc->shift, cc->mult); + + /* Read the current clock time, and save the cycle counter value */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->hw_tc); + clock_edge = adapter->hw_tc.cycle_last; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + clock_edge += div_u64(((u64)rem << cc->shift), cc->mult); + + /* X550 hardware stores the time in 32bits of 'billions of cycles' and + * 32bits of 'cycles'. There's no guarantee that cycles represents + * nanoseconds. However, we can use the math from a timespec64 to + * convert into the hardware representation. + * + * See ixgbe_ptp_read_X550() for more details. + */ + ts = ns_to_timespec64(clock_edge); + trgttiml = (u32)ts.tv_nsec; + trgttimh = (u32)ts.tv_sec; + + IXGBE_WRITE_REG(hw, IXGBE_FREQOUT0, freqout); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSSDP, tssdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + IXGBE_WRITE_FLUSH(hw); +} + +/** * ixgbe_ptp_read_X550 - read cycle counter value - * @hw_cc: cyclecounter structure + * @cc: cyclecounter structure * * This function reads SYSTIME registers. It is called by the cyclecounter * structure to convert from internal representation into nanoseconds. We need @@ -221,10 +323,10 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter) * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of * "cycles", rather than seconds and nanoseconds. */ -static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) +static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc) { struct ixgbe_adapter *adapter = - container_of(hw_cc, struct ixgbe_adapter, hw_cc); + container_of(cc, struct ixgbe_adapter, hw_cc); struct ixgbe_hw *hw = &adapter->hw; struct timespec64 ts; @@ -838,6 +940,15 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } +/** + * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration + * @adapter: pointer to adapter structure + * @ifr: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) { struct hwtstamp_config *config = &adapter->tstamp_config; @@ -1253,7 +1364,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; break; case ixgbe_mac_82599EB: snprintf(adapter->ptp_caps.name, @@ -1280,13 +1391,13 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.n_alarm = 0; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_per_out = 0; - adapter->ptp_caps.pps = 0; + adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; - adapter->ptp_setup_sdp = NULL; + adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X550; break; default: adapter->ptp_clock = NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 345701af7749..537dfff585e0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1645,7 +1645,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), (IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_TX_ANTISPOOF | - IXGBE_ETH_P_LLDP)); + ETH_P_LLDP)); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), (IXGBE_ETQF_FILTER_EN | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 84f2dba39e36..2be1c4c72435 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1067,6 +1067,7 @@ struct ixgbe_nvm_version { #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ #define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ /* Diagnostic Registers */ #define IXGBE_RDSTATCTL 0x02C20 @@ -2240,11 +2241,18 @@ enum { #define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_TSAUXC_EN_CLK 0x00000004 -#define IXGBE_TSAUXC_SYNCLK 0x00000008 -#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 #define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index bfe95ce0bd7f..6b609553329f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -14,57 +14,10 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, bool xdp_on = READ_ONCE(adapter->xdp_prog); int qid = ring->ring_idx; - if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || - qid >= adapter->num_xsk_umems || !xdp_on) + if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) return NULL; - return adapter->xsk_umems[qid]; -} - -static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) -{ - if (adapter->xsk_umems) - return 0; - - adapter->num_xsk_umems_used = 0; - adapter->num_xsk_umems = adapter->num_rx_queues; - adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, - sizeof(*adapter->xsk_umems), - GFP_KERNEL); - if (!adapter->xsk_umems) { - adapter->num_xsk_umems = 0; - return -ENOMEM; - } - - return 0; -} - -static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, - struct xdp_umem *umem, - u16 qid) -{ - int err; - - err = ixgbe_alloc_xsk_umems(adapter); - if (err) - return err; - - adapter->xsk_umems[qid] = umem; - adapter->num_xsk_umems_used++; - - return 0; -} - -static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) -{ - adapter->xsk_umems[qid] = NULL; - adapter->num_xsk_umems_used--; - - if (adapter->num_xsk_umems == 0) { - kfree(adapter->xsk_umems); - adapter->xsk_umems = NULL; - adapter->num_xsk_umems = 0; - } + return xdp_get_umem_from_qid(adapter->netdev, qid); } static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, @@ -113,6 +66,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid) { + struct net_device *netdev = adapter->netdev; struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -120,12 +74,9 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, if (qid >= adapter->num_rx_queues) return -EINVAL; - if (adapter->xsk_umems) { - if (qid >= adapter->num_xsk_umems) - return -EINVAL; - if (adapter->xsk_umems[qid]) - return -EBUSY; - } + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) + return -EINVAL; reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); if (!reuseq) @@ -138,14 +89,12 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, return err; if_running = netif_running(adapter->netdev) && - READ_ONCE(adapter->xdp_prog); + ixgbe_enabled_xdp_adapter(adapter); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); - err = ixgbe_add_xsk_umem(adapter, umem, qid); - if (err) - return err; + set_bit(qid, adapter->af_xdp_zc_qps); if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); @@ -161,20 +110,21 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) { + struct xdp_umem *umem; bool if_running; - if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || - !adapter->xsk_umems[qid]) + umem = xdp_get_umem_from_qid(adapter->netdev, qid); + if (!umem) return -EINVAL; if_running = netif_running(adapter->netdev) && - READ_ONCE(adapter->xdp_prog); + ixgbe_enabled_xdp_adapter(adapter); if (if_running) ixgbe_txrx_ring_disable(adapter, qid); - ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); - ixgbe_remove_xsk_umem(adapter, qid); + clear_bit(qid, adapter->af_xdp_zc_qps); + ixgbe_xsk_umem_dma_unmap(adapter, umem); if (if_running) ixgbe_txrx_ring_enable(adapter, qid); @@ -621,8 +571,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; bool work_done = true; - u32 len, cmd_type; + struct xdp_desc desc; dma_addr_t dma; + u32 cmd_type; while (budget-- > 0) { if (unlikely(!ixgbe_desc_unused(xdp_ring)) || @@ -631,15 +582,18 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) break; } - if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; - dma_sync_single_for_device(xdp_ring->dev, dma, len, + dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); + + dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, DMA_BIDIRECTIONAL); tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; - tx_bi->bytecount = len; + tx_bi->bytecount = desc.len; tx_bi->xdpf = NULL; + tx_bi->gso_segs = 1; tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -648,10 +602,10 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; - cmd_type |= len | IXGBE_TXD_CMD; + cmd_type |= desc.len | IXGBE_TXD_CMD; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = - cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT); xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) @@ -704,7 +658,6 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, xsk_frames++; tx_bi->xdpf = NULL; - total_bytes += tx_bi->bytecount; tx_bi++; tx_desc++; @@ -753,7 +706,7 @@ int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) if (qid >= adapter->num_xdp_queues) return -ENXIO; - if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) + if (!adapter->xdp_ring[qid]->xsk_umem) return -ENXIO; ring = adapter->xdp_ring[qid]; diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 5399787e07af..54459b69c948 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -85,22 +85,16 @@ static int ixgbevf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - u32 link_speed = 0; - bool link_up; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = -1; - hw->mac.get_link_status = 1; - hw->mac.ops.check_link(hw, &link_speed, &link_up, false); - - if (link_up) { + if (adapter->link_up) { __u32 speed = SPEED_10000; - switch (link_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed = SPEED_10000; break; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d189ed247665..d2b41f9f87f8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1423,6 +1423,9 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index cd3b81300cc7..d5ce49636548 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -508,9 +508,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } - ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); - - return 0; + return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + IXGBE_VFMAILBOX_SIZE); } /** |