summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig25
-rw-r--r--drivers/net/ethernet/intel/e100.c38
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c57
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c68
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c166
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h496
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h203
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h4658
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c234
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c68
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h8
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h214
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1432
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c745
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c773
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h314
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1139
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c50
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c46
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h39
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c112
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c156
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c28
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c256
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h30
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c283
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c331
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c6
125 files changed, 6201 insertions, 7999 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 48a8f9aa1dd0..5aa86318ed3e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -34,7 +34,7 @@ config E100
to identify the adapter.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/e100.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/e100.rst>.
To compile this driver as a module, choose M here. The module
will be called e100.
@@ -50,7 +50,7 @@ config E1000
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/e1000.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/e1000.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000.
@@ -70,7 +70,7 @@ config E1000E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/e1000e.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/e1000e.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000e.
@@ -98,7 +98,7 @@ config IGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/igb.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/igb.rst>.
To compile this driver as a module, choose M here. The module
will be called igb.
@@ -134,7 +134,7 @@ config IGBVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/igbvf.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/igbvf.rst>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
@@ -151,7 +151,7 @@ config IXGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/ixgb.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/ixgb.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
@@ -170,7 +170,7 @@ config IXGBE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/ixgbe.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbe.
@@ -222,7 +222,7 @@ config IXGBEVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/ixgbevf.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/ixgbevf.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
@@ -249,7 +249,7 @@ config I40E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/i40e.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/i40e.rst>.
To compile this driver as a module, choose M here. The module
will be called i40e.
@@ -284,7 +284,7 @@ config I40EVF
This driver was formerly named i40evf.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/iavf.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/iavf.rst>.
To compile this driver as a module, choose M here. The module
will be called iavf. MSI-X interrupt support is required
@@ -295,6 +295,7 @@ config ICE
default n
depends on PCI_MSI
select NET_DEVLINK
+ select PLDMFW
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
@@ -303,7 +304,7 @@ config ICE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/ice.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/ice.rst>.
To compile this driver as a module, choose M here. The module
will be called ice.
@@ -321,7 +322,7 @@ config FM10K
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/intel/fm10k.rst>.
+ <file:Documentation/networking/device_drivers/ethernet/intel/fm10k.rst>.
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1b8d015ebfb0..36da059388dc 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -150,8 +150,6 @@
#define DRV_NAME "e100"
-#define DRV_EXT "-NAPI"
-#define DRV_VERSION "3.5.24-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
@@ -165,7 +163,6 @@
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
MODULE_FIRMWARE(FIRMWARE_D102E);
@@ -2430,7 +2427,6 @@ static void e100_get_drvinfo(struct net_device *netdev,
{
struct nic *nic = netdev_priv(netdev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
@@ -2997,8 +2993,6 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
e100_down(nic);
netif_device_detach(netdev);
- pci_save_state(pdev);
-
if ((nic->flags & wol_magic) | e100_asf(nic)) {
/* enable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
@@ -3028,24 +3022,22 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-#ifdef CONFIG_PM
-static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused e100_suspend(struct device *dev_d)
{
bool wake;
- __e100_shutdown(pdev, &wake);
- return __e100_power_off(pdev, wake);
+
+ __e100_shutdown(to_pci_dev(dev_d), &wake);
+
+ device_wakeup_disable(dev_d);
+
+ return 0;
}
-static int e100_resume(struct pci_dev *pdev)
+static int __maybe_unused e100_resume(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* disable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
@@ -3062,7 +3054,6 @@ static int e100_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
static void e100_shutdown(struct pci_dev *pdev)
{
@@ -3150,16 +3141,17 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
+static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+
static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
.probe = e100_probe,
.remove = e100_remove,
-#ifdef CONFIG_PM
+
/* Power Management hooks */
- .suspend = e100_suspend,
- .resume = e100_resume,
-#endif
+ .driver.pm = &e100_pm_ops,
+
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
};
@@ -3167,7 +3159,7 @@ static struct pci_driver e100_driver = {
static int __init e100_init_module(void)
{
if (((1 << debug) - 1) & NETIF_MSG_DRV) {
- pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_DESCRIPTION);
pr_info("%s\n", DRV_COPYRIGHT);
}
return pci_register_driver(&e100_driver);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 7fad2f24dcad..4817eb13ca6f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -330,7 +330,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
dev_err(&adapter->pdev->dev, format, ## arg)
extern char e1000_driver_name[];
-extern const char e1000_driver_version[];
int e1000_open(struct net_device *netdev);
int e1000_close(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 6f45df5690d4..f976e9daa3d8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -533,8 +533,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, e1000_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -1358,8 +1356,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ skb->data[frame_size / 2 + 10] = 0xBE;
+ skb->data[frame_size / 2 + 12] = 0xAF;
}
static int e1000_check_lbtest_frame(const unsigned char *data,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 623e516a9630..4e7a0810eaeb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4526,7 +4526,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
~IGP01E1000_GMII_SPD));
if (ret_val)
return ret_val;
- /* Fall Through */
+ fallthrough;
default:
if (hw->media_type == e1000_media_type_fiber) {
ledctl = er32(LEDCTL);
@@ -4571,7 +4571,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
hw->phy_spd_default);
if (ret_val)
return ret_val;
- /* Fall Through */
+ fallthrough;
default:
/* Restore LEDCTL settings */
ew32(LEDCTL, hw->ledctl_default);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d9fa4600f745..1e6ec081fd9d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -10,8 +10,6 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k8-NAPI"
-const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
@@ -151,10 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
-static int e1000_resume(struct pci_dev *pdev);
-#endif
+static int __maybe_unused e1000_suspend(struct device *dev);
+static int __maybe_unused e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -179,16 +175,16 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
+static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
-#endif
+ .driver = {
+ .pm = &e1000_pm_ops,
+ },
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -196,7 +192,6 @@ static struct pci_driver e1000_driver = {
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -223,7 +218,7 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
static int __init e1000_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
+ pr_info("%s\n", e1000_driver_string);
pr_info("%s\n", e1000_copyright);
@@ -1143,7 +1138,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
}
- /* Fall Through */
+ fallthrough;
default:
e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
@@ -3159,7 +3154,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if ((unsigned long)(skb_tail_pointer(skb) - 1)
& 4)
break;
- /* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
e_err(drv, "__pskb_pull_tail "
@@ -5060,9 +5054,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
netif_device_detach(netdev);
@@ -5076,12 +5067,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
e1000_down(adapter);
}
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5142,37 +5127,26 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused e1000_suspend(struct device *dev)
{
int retval;
+ struct pci_dev *pdev = to_pci_dev(dev);
bool wake;
retval = __e1000_shutdown(pdev, &wake);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ device_set_wakeup_enable(dev, wake);
- return 0;
+ return retval;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __maybe_unused e1000_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
if (adapter->need_ioport)
err = pci_enable_device(pdev);
else
@@ -5209,7 +5183,6 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
static void e1000_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index d3f29ffe1e47..4d4f5bf1e516 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -708,7 +708,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
- /* fall through */
+ fallthrough;
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2c1bab377b2a..88faf05e23ba 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -154,7 +154,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
ew32(EECD, eecd);
break;
}
- /* Fall Through */
+ fallthrough;
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
@@ -1107,7 +1107,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
switch (mac->type) {
case e1000_82573:
e1000e_enable_tx_pkt_filtering(hw);
- /* fall through */
+ fallthrough;
case e1000_82574:
case e1000_82583:
reg_data = er32(GCR);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 944abd5eae11..ba7a0f8f6937 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -460,7 +460,6 @@ enum latency_range {
};
extern char e1000e_driver_name[];
-extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 1d47e2503072..a8fc9208382c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -633,8 +633,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -895,7 +893,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- /* fall through */
case e1000_pch_tgp:
case e1000_pch_adp:
mask |= BIT(18);
@@ -1571,7 +1568,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
/* set bit 29 (value of MULR requests is now 0) */
tarc0 &= 0xcfffffff;
ew32(TARC(0), tarc0);
- /* fall through */
+ fallthrough;
case e1000_80003es2lan:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
@@ -1579,7 +1576,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(CTRL_EXT, adapter->tx_fifo_head);
adapter->tx_fifo_head = 0;
}
- /* fall through */
+ fallthrough;
case e1000_82571:
case e1000_82572:
if (hw->phy.media_type == e1000_media_type_fiber ||
@@ -1589,7 +1586,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
usleep_range(10000, 11000);
break;
}
- /* Fall Through */
+ fallthrough;
default:
hw->mac.autoneg = 1;
if (hw->phy.type == e1000_phy_gg82563)
@@ -1611,8 +1608,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ skb->data[frame_size / 2 + 10] = 0xBE;
+ skb->data[frame_size / 2 + 12] = 0xAF;
}
static int e1000_check_lbtest_frame(struct sk_buff *skb,
@@ -2124,7 +2121,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
case TCP_V4_FLOW:
if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -2135,7 +2132,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
case TCP_V6_FLOW:
if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f999cca37a8a..b2f2fcfdf732 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
- if (ret_val) {
+ if (ret_val)
e_warn("Failed to disable ULP\n");
- goto out;
- }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
@@ -338,12 +336,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
msleep(50);
- /* fall-through */
+ fallthrough;
case e1000_pch2lan:
if (e1000_phy_is_accessible_pchlan(hw))
break;
- /* fall-through */
+ fallthrough;
case e1000_pchlan:
if ((hw->mac.type == e1000_pchlan) &&
(fwsm & E1000_ICH_FWSM_FW_VALID))
@@ -459,7 +457,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
return ret_val;
if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
break;
- /* fall-through */
+ fallthrough;
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
@@ -704,7 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch2lan:
mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch2lan;
- /* fall-through */
+ fallthrough;
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
@@ -1559,7 +1557,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
return ret_val;
- /* fall-thru */
+ fallthrough;
case e1000_pchlan:
if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw);
@@ -2096,7 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
break;
}
- /* Fall-thru */
+ fallthrough;
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
@@ -3189,7 +3187,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
return 0;
}
e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
- /* fall-thru */
+ fallthrough;
default:
/* set bank to 0 in case flash read fails */
*bank = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a279f4fa9962..63dde3bcf5bc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -28,11 +28,7 @@
#include "e1000.h"
-#define DRV_EXTRAVERSION "-k"
-
-#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
-const char e1000e_driver_version[] = DRV_VERSION;
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -2111,7 +2107,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
- /* Fall through */
+ fallthrough;
case E1000E_INT_MODE_MSI:
if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
@@ -2119,7 +2115,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
adapter->int_mode = E1000E_INT_MODE_LEGACY;
e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
- /* Fall through */
+ fallthrough;
case E1000E_INT_MODE_LEGACY:
/* Don't do anything; this is the system default */
break;
@@ -3177,10 +3173,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
switch (adapter->rx_ps_pages) {
case 3:
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
- /* fall-through */
+ fallthrough;
case 2:
psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
- /* fall-through */
+ fallthrough;
case 1:
psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
break;
@@ -3677,9 +3673,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
is_l2 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- /* Hardware cannot filter just V2 L4 Sync messages;
- * fall-through to V2 (both L2 and L4) Sync.
- */
+ /* Hardware cannot filter just V2 L4 Sync messages */
+ fallthrough;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* Also time stamps V2 Path Delay Request/Response. */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
@@ -3688,9 +3683,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- /* Hardware cannot filter just V2 L4 Delay Request messages;
- * fall-through to V2 (both L2 and L4) Delay Request.
- */
+ /* Hardware cannot filter just V2 L4 Delay Request messages */
+ fallthrough;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Also time stamps V2 Path Delay Request/Response. */
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
@@ -3700,9 +3694,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- /* Hardware cannot filter just V2 L4 or L2 Event messages;
- * fall-through to all V2 (both L2 and L4) Events.
- */
+ /* Hardware cannot filter just V2 L4 or L2 Event messages */
+ fallthrough;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
@@ -3714,6 +3707,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
* Delay Request messages but not both so fall-through to
* time stamp all packets.
*/
+ fallthrough;
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
is_l2 = true;
@@ -4060,7 +4054,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = fc->high_water - 8;
break;
}
- /* fall-through */
+ fallthrough;
default:
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - adapter->max_frame_size));
@@ -4085,7 +4079,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- /* fall-through */
+ fallthrough;
case e1000_pch_tgp:
case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
@@ -6349,7 +6343,6 @@ fl_out:
pm_runtime_put_sync(netdev->dev.parent);
}
-#ifdef CONFIG_PM_SLEEP
/* S0ix implementation */
static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
{
@@ -6571,7 +6564,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_data);
}
-#endif /* CONFIG_PM_SLEEP */
static int e1000e_pm_freeze(struct device *dev)
{
@@ -6611,11 +6603,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ /* Runtime suspend should only enable wakeup for link changes */
+ if (runtime)
+ wufc = E1000_WUFC_LNKC;
+ else if (device_may_wakeup(&pdev->dev))
+ wufc = adapter->wol;
+ else
+ wufc = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -6672,7 +6670,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
@@ -6764,7 +6762,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
case PCIE_LINK_STATE_L0S:
case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
- /* fall-through - can't have L1 without L0s */
+ fallthrough; /* can't have L1 without L0s */
case PCIE_LINK_STATE_L1:
aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
break;
@@ -6869,7 +6867,6 @@ err_irq:
return rc;
}
-#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -6935,8 +6932,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6960,7 +6956,7 @@ static int e1000e_pm_suspend(struct device *dev)
return rc;
}
-static int e1000e_pm_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6979,9 +6975,8 @@ static int e1000e_pm_resume(struct device *dev)
return e1000e_pm_thaw(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-static int e1000e_pm_runtime_idle(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6997,7 +6992,7 @@ static int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int e1000e_pm_runtime_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7014,7 +7009,7 @@ static int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static int e1000e_pm_runtime_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7039,7 +7034,6 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
@@ -7899,8 +7893,7 @@ static struct pci_driver e1000_driver = {
**/
static int __init e1000_init_module(void)
{
- pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_version);
+ pr_info("Intel(R) PRO/1000 Network Driver\n");
pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
return pci_register_driver(&e1000_driver);
@@ -7922,6 +7915,5 @@ module_exit(e1000_exit_module);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
/* netdev.c */
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 098369fd3e65..ebe121db4307 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -375,7 +375,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"%s Invalid mode - setting default\n",
opt.name);
adapter->itr_setting = opt.def;
- /* fall-through */
+ fallthrough;
case 3:
dev_info(&adapter->pdev->dev,
"%s set to dynamic conservative mode\n",
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 42233019255a..e11c877595fb 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -607,7 +607,7 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CTL1000_ENABLE_MASTER;
- /* fall-through */
+ fallthrough;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 439fda2f5368..34b988d70488 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,7 +295,6 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- /* fall-through */
case e1000_pch_tgp:
case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
@@ -303,7 +302,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
}
- /* fall-through */
+ fallthrough;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = 600000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 5b78362b82ac..6119a4108838 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -221,12 +221,6 @@ struct fm10k_iov_data {
struct fm10k_vf_info vf_info[];
};
-struct fm10k_udp_port {
- struct list_head list;
- sa_family_t sa_family;
- __be16 port;
-};
-
enum fm10k_macvlan_request_type {
FM10K_UC_MAC_REQUEST,
FM10K_MC_MAC_REQUEST,
@@ -370,8 +364,8 @@ struct fm10k_intfc {
u32 rssrk[FM10K_RSSRK_SIZE];
/* UDP encapsulation port tracking information */
- struct list_head vxlan_port;
- struct list_head geneve_port;
+ __be16 vxlan_port;
+ __be16 geneve_port;
/* MAC/VLAN update queue */
struct list_head macvlan_requests;
@@ -476,7 +470,6 @@ struct fm10k_cb {
/* main */
extern char fm10k_driver_name[];
-extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
__be16 fm10k_tx_encap_offload(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 37fbc646deb9..908fefaa6b85 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -449,8 +449,6 @@ static void fm10k_get_drvinfo(struct net_device *dev,
strncpy(info->driver, fm10k_driver_name,
sizeof(info->driver) - 1);
- strncpy(info->version, fm10k_driver_version,
- sizeof(info->version) - 1);
strncpy(info->bus_info, pci_name(interface->pdev),
sizeof(info->bus_info) - 1);
}
@@ -694,12 +692,12 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
case TCP_V4_FLOW:
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case UDP_V4_FLOW:
if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V4_FLOW:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 17738b0a9873..d88dd41a9442 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,9 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.27.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
-const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
@@ -22,7 +20,6 @@ static const char fm10k_copyright[] =
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
/* single workqueue for entire fm10k driver */
struct workqueue_struct *fm10k_workqueue;
@@ -35,7 +32,7 @@ struct workqueue_struct *fm10k_workqueue;
**/
static int __init fm10k_init_module(void)
{
- pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
+ pr_info("%s\n", fm10k_driver_string);
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
@@ -638,15 +635,8 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
{
struct fm10k_intfc *interface = netdev_priv(skb->dev);
- struct fm10k_udp_port *vxlan_port;
- /* we can only offload a vxlan if we recognize it as such */
- vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_udp_port, list);
-
- if (!vxlan_port)
- return NULL;
- if (vxlan_port->port != udp_hdr(skb)->dest)
+ if (interface->vxlan_port != udp_hdr(skb)->dest)
return NULL;
/* return offset of udp_hdr plus 8 bytes for VXLAN header */
@@ -859,7 +849,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
case IPPROTO_GRE:
if (skb->encapsulation)
break;
- /* fall through */
+ fallthrough;
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
@@ -1557,7 +1547,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
+ * fall through conditions.
*
**/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 75e51f91036c..8e2e92bf3cd4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -967,7 +967,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
if (tail != mbx->head)
return FM10K_MBX_ERR_TAIL;
- /* fall through */
+ fallthrough;
case FM10K_MSG_DATA:
/* validate that head is moving correctly */
if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
@@ -987,7 +987,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1)))
return FM10K_MBX_ERR_SIZE;
- /* fall through */
+ fallthrough;
case FM10K_MSG_ERROR:
if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
return FM10K_MBX_ERR_HEAD;
@@ -1570,7 +1570,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
break;
}
- /* fall through */
+ fallthrough;
default:
return FM10K_MBX_ERR_NO_MBX;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 0637ccadee79..5c19ff452558 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -367,39 +367,6 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface)
}
/**
- * fm10k_free_udp_port_info
- * @interface: board private structure
- *
- * This function frees both geneve_port and vxlan_port structures
- **/
-static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
-{
- struct fm10k_udp_port *port;
-
- /* flush all entries from vxlan list */
- port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_udp_port, list);
- while (port) {
- list_del(&port->list);
- kfree(port);
- port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_udp_port,
- list);
- }
-
- /* flush all entries from geneve list */
- port = list_first_entry_or_null(&interface->geneve_port,
- struct fm10k_udp_port, list);
- while (port) {
- list_del(&port->list);
- kfree(port);
- port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_udp_port,
- list);
- }
-}
-
-/**
* fm10k_restore_udp_port_info
* @interface: board private structure
*
@@ -408,131 +375,52 @@ static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
- struct fm10k_udp_port *port;
/* only the PF supports configuring tunnels */
if (hw->mac.type != fm10k_mac_pf)
return;
- port = list_first_entry_or_null(&interface->vxlan_port,
- struct fm10k_udp_port, list);
-
/* restore tunnel configuration register */
fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
- (port ? ntohs(port->port) : 0) |
+ ntohs(interface->vxlan_port) |
(ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
- port = list_first_entry_or_null(&interface->geneve_port,
- struct fm10k_udp_port, list);
-
/* restore Geneve tunnel configuration register */
fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
- (port ? ntohs(port->port) : 0));
-}
-
-static struct fm10k_udp_port *
-fm10k_remove_tunnel_port(struct list_head *ports,
- struct udp_tunnel_info *ti)
-{
- struct fm10k_udp_port *port;
-
- list_for_each_entry(port, ports, list) {
- if ((port->port == ti->port) &&
- (port->sa_family == ti->sa_family)) {
- list_del(&port->list);
- return port;
- }
- }
-
- return NULL;
-}
-
-static void fm10k_insert_tunnel_port(struct list_head *ports,
- struct udp_tunnel_info *ti)
-{
- struct fm10k_udp_port *port;
-
- /* remove existing port entry from the list so that the newest items
- * are always at the tail of the list.
- */
- port = fm10k_remove_tunnel_port(ports, ti);
- if (!port) {
- port = kmalloc(sizeof(*port), GFP_ATOMIC);
- if (!port)
- return;
- port->port = ti->port;
- port->sa_family = ti->sa_family;
- }
-
- list_add_tail(&port->list, ports);
+ ntohs(interface->geneve_port));
}
/**
- * fm10k_udp_tunnel_add
+ * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
* @dev: network interface device structure
- * @ti: Tunnel endpoint information
+ * @table: Tunnel table (according to tables of @fm10k_udp_tunnels)
*
- * This function is called when a new UDP tunnel port has been added.
+ * This function is called when a new UDP tunnel port is added or deleted.
* Due to hardware restrictions, only one port per type can be offloaded at
- * once.
+ * once. Core will send to the driver a port of its choice.
**/
-static void fm10k_udp_tunnel_add(struct net_device *dev,
- struct udp_tunnel_info *ti)
+static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ struct udp_tunnel_info ti;
- /* only the PF supports configuring tunnels */
- if (interface->hw.mac.type != fm10k_mac_pf)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- fm10k_insert_tunnel_port(&interface->vxlan_port, ti);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- fm10k_insert_tunnel_port(&interface->geneve_port, ti);
- break;
- default:
- return;
- }
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (!table)
+ interface->vxlan_port = ti.port;
+ else
+ interface->geneve_port = ti.port;
fm10k_restore_udp_port_info(interface);
+ return 0;
}
-/**
- * fm10k_udp_tunnel_del
- * @dev: network interface device structure
- * @ti: Tunnel end point information
- *
- * This function is called when a new UDP tunnel port is deleted. The freed
- * port will be removed from the list, then we reprogram the offloaded port
- * based on the head of the list.
- **/
-static void fm10k_udp_tunnel_del(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_udp_port *port = NULL;
-
- if (interface->hw.mac.type != fm10k_mac_pf)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- port = fm10k_remove_tunnel_port(&interface->geneve_port, ti);
- break;
- default:
- return;
- }
-
- /* if we did remove a port we need to free its memory */
- kfree(port);
-
- fm10k_restore_udp_port_info(interface);
-}
+static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
+ .sync_table = fm10k_udp_tunnel_sync,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
/**
* fm10k_open - Called when a network interface is made active
@@ -580,8 +468,6 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
- udp_tunnel_get_rx_info(netdev);
-
fm10k_up(interface);
return 0;
@@ -615,8 +501,6 @@ int fm10k_close(struct net_device *netdev)
fm10k_qv_free_irq(interface);
- fm10k_free_udp_port_info(interface);
-
fm10k_free_all_tx_resources(interface);
fm10k_free_all_rx_resources(interface);
@@ -853,7 +737,7 @@ void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
/* Don't free requests for other interfaces */
if (r->mac.glort != glort)
break;
- /* fall through */
+ fallthrough;
case FM10K_VLAN_REQUEST:
if (vlans) {
list_del(&r->list);
@@ -1647,8 +1531,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
- .ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
- .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
.ndo_features_check = fm10k_features_check,
@@ -1695,6 +1579,8 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
NETIF_F_SG;
dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
}
/* all features defined to this point should be changeable */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index d122d0087191..140212bfe08b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2066,10 +2066,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
interface->tx_itr = FM10K_TX_ITR_DEFAULT;
interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
- /* initialize udp port lists */
- INIT_LIST_HEAD(&interface->vxlan_port);
- INIT_LIST_HEAD(&interface->geneve_port);
-
/* Initialize the MAC/VLAN queue */
INIT_LIST_HEAD(&interface->macvlan_requests);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index be07bfdb0bb4..c0780c3624c8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1317,19 +1317,19 @@ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
case FM10K_XCAST_MODE_PROMISC:
if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
return FM10K_XCAST_MODE_PROMISC;
- /* fall through */
+ fallthrough;
case FM10K_XCAST_MODE_ALLMULTI:
if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
return FM10K_XCAST_MODE_ALLMULTI;
- /* fall through */
+ fallthrough;
case FM10K_XCAST_MODE_MULTI:
if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
return FM10K_XCAST_MODE_MULTI;
- /* fall through */
+ fallthrough;
case FM10K_XCAST_MODE_NONE:
if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
return FM10K_XCAST_MODE_NONE;
- /* fall through */
+ fallthrough;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e95b8da45e07..a7e212d1caa2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -38,7 +38,7 @@
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
-#include "i40e_client.h"
+#include <linux/net/intel/i40e_client.h>
#include <linux/avf/virtchnl.h>
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
@@ -60,17 +60,14 @@
(((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
-#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
-#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
-#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
@@ -92,10 +89,6 @@
#define I40E_OEM_SNAP_SHIFT 16
#define I40E_OEM_RELEASE_MASK 0x0000ffff
-/* The values in here are decimal coded as hex as is the case in the NVM map*/
-#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x40
-
#define I40E_RX_DESC(R, i) \
(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
@@ -105,9 +98,6 @@
#define I40E_TX_FDIRDESC(R, i) \
(&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
-/* default to trying for four seconds */
-#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
-
/* BW rate limiting */
#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
#define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */
@@ -295,9 +285,6 @@ struct i40e_cloud_filter {
u8 tunnel_type;
};
-#define I40E_DCB_PRIO_TYPE_STRICT 0
-#define I40E_DCB_PRIO_TYPE_ETS 1
-#define I40E_DCB_STRICT_PRIO_CREDITS 127
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -357,15 +344,6 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_SET_FSIZE(fsize) | \
I40E_FLEX_SET_SRC_WORD(src))
-#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
- I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
- I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
- I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
- I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
- I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
- I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_MAX_FLEX_SRC_OFFSET 0x1F
@@ -390,7 +368,6 @@ struct i40e_ddp_old_profile_list {
#define I40E_L4_GLQF_ORT_IDX 35
/* Flex PIT register index */
-#define I40E_FLEX_PIT_IDX_START_L2 0
#define I40E_FLEX_PIT_IDX_START_L3 3
#define I40E_FLEX_PIT_IDX_START_L4 6
@@ -531,7 +508,6 @@ struct i40e_pf {
#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
#define I40E_HW_PTP_L4_CAPABLE BIT(10)
#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12)
#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
@@ -567,6 +543,28 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
+/* TOTAL_PORT_SHUTDOWN
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
+ * and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but they also
+ * have similar behavior (allowing to disable physical link of the port),
+ * with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
+ * supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
+ * only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
+ * by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
+ * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
+ * in abilities field of i40e_aq_set_phy_config structure
+ */
+#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -992,7 +990,6 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
-extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 6a089848c857..c897a2863e4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -541,7 +541,7 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- /* fall through */
+ fallthrough;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index aa5f1c0aa721..a62ddd626929 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -55,29 +55,17 @@ struct i40e_aq_desc {
*/
/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
#define I40E_AQ_FLAG_LB_SHIFT 9
#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
#define I40E_AQ_FLAG_BUF_SHIFT 12
#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -362,13 +350,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
-
struct i40e_aqc_request_resource {
__le16 resource_id;
__le16 access_type;
@@ -384,7 +365,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
*/
struct i40e_aqc_list_capabilites {
u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
u8 pf_index;
u8 reserved[2];
__le32 count;
@@ -411,8 +391,6 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
#define I40E_AQ_CAP_ID_SRIOV 0x0012
#define I40E_AQ_CAP_ID_VF 0x0013
#define I40E_AQ_CAP_ID_VMDQ 0x0014
@@ -441,11 +419,6 @@ struct i40e_aqc_list_capabilities_element_resp {
/* Set CPPM Configuration (direct 0x0103) */
struct i40e_aqc_cppm_configuration {
__le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
__le16 ttlx;
__le32 dmacr;
__le16 dmcth;
@@ -459,15 +432,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0800
-#define I40E_AQ_ARP_UNSUP_CTL 0x1000
-#define I40E_AQ_ARP_ENA 0x2000
-#define I40E_AQ_ARP_ADD_IPV4 0x4000
-#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
__le32 enabled_offloads;
-#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
-#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -482,19 +448,6 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0001
-#define I40E_AQ_NS_PROXY_DEL_0 0x0002
-#define I40E_AQ_NS_PROXY_ADD_1 0x0004
-#define I40E_AQ_NS_PROXY_DEL_1 0x0008
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
-#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
-#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -507,7 +460,6 @@ I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
u8 reserved[2];
__le32 sal;
__le16 sah;
@@ -520,11 +472,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -548,9 +496,7 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
-#define I40E_AQC_WRITE_TYPE_MASK 0xC000
__le16 mac_sah;
__le32 mac_sal;
@@ -573,22 +519,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
struct i40e_aqc_set_wol_filter {
__le16 filter_index;
-#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
- I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
-
-#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
-#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
- I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+
__le16 cmd_flags;
-#define I40E_AQC_SET_WOL_FILTER 0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
-#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
-#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
-#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
u8 reserved[2];
__le32 address_high;
__le32 address_low;
@@ -608,12 +541,6 @@ I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
struct i40e_aqc_get_wake_reason_completion {
u8 reserved_1[2];
__le16 wake_reason;
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
- I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
- I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
u8 reserved_2[12];
};
@@ -646,25 +573,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
__le16 seid;
__le16 uplink_seid;
__le16 downlink_seid;
u8 reserved[3];
u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
__le16 scheduler_id;
__le16 element_info;
};
@@ -697,12 +611,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
/* Set Port Parameters command (direct 0x0203) */
struct i40e_aqc_set_port_parameters {
__le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -722,25 +631,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
/* expect an array of these structs in the response buffer */
struct i40e_aqc_switch_resource_alloc_element_resp {
u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
u8 reserved1;
__le16 guaranteed;
__le16 total;
@@ -756,7 +646,6 @@ struct i40e_aqc_set_switch_config {
__le16 flags;
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
-#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
/* The ethertype in switch_tag is dropped on ingress and used
* internally by the switch. Set this to zero for the default
@@ -789,17 +678,10 @@ struct i40e_aqc_set_switch_config {
*/
#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
-#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
-#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
-#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
-#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
-#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
-#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
-#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
u8 mode;
u8 rsvd5[5];
};
@@ -834,19 +716,13 @@ struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
u8 connection_type;
#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
u8 reserved1;
u8 vf_id;
u8 reserved2;
__le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
#define I40E_AQ_VSI_TYPE_VF 0x0
#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
__le32 addr_high;
__le32 addr_low;
};
@@ -870,24 +746,18 @@ struct i40e_aqc_vsi_properties_data {
#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
/* switch section */
__le16 switch_id; /* 12bit id combined with flags below */
#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
u8 sw_reserved[2];
/* security section */
u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
u8 sec_reserved;
@@ -899,78 +769,33 @@ struct i40e_aqc_vsi_properties_data {
#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
I40E_AQ_VSI_PVLAN_MODE_SHIFT)
#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
u8 pvlan_reserved[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
__le32 egress_table; /* same defines as for ingress table */
/* cascaded PV section */
__le16 cas_pv_tag;
u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
u8 cas_pv_reserved;
/* queue mapping section */
__le16 mapping_flags;
#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
__le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
__le16 tc_mapping[8];
#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
@@ -995,10 +820,6 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
*/
struct i40e_aqc_add_update_pv {
__le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
__le16 uplink_seid;
__le16 connected_seid;
u8 reserved[10];
@@ -1009,10 +830,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
struct i40e_aqc_add_update_pv_completion {
/* reserved for update; for add also encodes error if rc == ENOSPC */
__le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
u8 reserved[14];
};
@@ -1026,9 +843,6 @@ struct i40e_aqc_get_pv_params_completion {
__le16 seid;
__le16 default_stag;
__le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
u8 reserved[8];
__le16 default_port_seid;
};
@@ -1041,12 +855,8 @@ struct i40e_aqc_add_veb {
__le16 downlink_seid;
__le16 veb_flags;
#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
u8 enable_tcs;
u8 reserved[9];
@@ -1059,10 +869,6 @@ struct i40e_aqc_add_veb_completion {
__le16 switch_seid;
/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
__le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
__le16 statistic_index;
__le16 vebs_used;
__le16 vebs_free;
@@ -1095,9 +901,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
struct i40e_aqc_macvlan {
__le16 num_addresses;
__le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
__le32 addr_high;
__le32 addr_low;
@@ -1111,18 +914,11 @@ struct i40e_aqc_add_macvlan_element_data {
__le16 vlan_tag;
__le16 flags;
#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
__le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
- I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
/* response section */
u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
#define I40E_AQC_MM_ERR_NO_RES 0xFF
u8 reserved1[3];
};
@@ -1148,14 +944,10 @@ struct i40e_aqc_remove_macvlan_element_data {
__le16 vlan_tag;
u8 flags;
#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
u8 reserved[3];
/* reply section */
u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
u8 reply_reserved[3];
};
@@ -1166,30 +958,8 @@ struct i40e_aqc_remove_macvlan_element_data {
struct i40e_aqc_add_remove_vlan_element_data {
__le16 vlan_tag;
u8 vlan_flags;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
u8 reserved;
u8 result;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
u8 reserved1[3];
};
@@ -1213,9 +983,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
__le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -1227,11 +995,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
*/
struct i40e_aqc_add_tag {
__le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
__le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
__le16 tag;
__le16 queue_number;
u8 reserved[8];
@@ -1252,9 +1016,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
*/
struct i40e_aqc_remove_tag {
__le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
__le16 tag;
u8 reserved[12];
};
@@ -1290,9 +1051,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
/* Update S/E-Tag (direct 0x0259) */
struct i40e_aqc_update_tag {
__le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
__le16 old_tag;
__le16 new_tag;
u8 reserved[10];
@@ -1319,13 +1077,8 @@ struct i40e_aqc_add_remove_control_packet_filter {
__le16 flags;
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
__le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
__le16 queue;
u8 reserved[2];
};
@@ -1351,9 +1104,6 @@ struct i40e_aqc_add_remove_cloud_filters {
u8 num_filters;
u8 reserved;
__le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
u8 big_buffer_flag;
#define I40E_AQC_ADD_CLOUD_CMD_BB 1
u8 reserved2[3];
@@ -1380,9 +1130,6 @@ struct i40e_aqc_cloud_filters_element_data {
} raw_v6;
} ipaddr;
__le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
/* 0x0001 reserved */
/* 0x0002 reserved */
@@ -1404,36 +1151,20 @@ struct i40e_aqc_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
__le32 tenant_id;
u8 reserved[4];
__le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
u8 response_reserved[7];
};
@@ -1445,37 +1176,7 @@ I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
struct i40e_aqc_cloud_filters_element_bb {
struct i40e_aqc_cloud_filters_element_data element;
u16 general_fields[32];
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
};
I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
@@ -1504,11 +1205,6 @@ I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
struct i40e_aqc_replace_cloud_filters_cmd {
u8 valid_flags;
-#define I40E_AQC_REPLACE_L1_FILTER 0x0
-#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
-#define I40E_AQC_GET_CLOUD_FILTERS 0x2
-#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
-#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
u8 old_filter_type;
u8 new_filter_type;
u8 tr_bit;
@@ -1521,25 +1217,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
struct i40e_aqc_replace_cloud_filters_cmd_buf {
u8 data[32];
-/* Filter type INPUT codes*/
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
-
-/* Field Vector offsets */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
-
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
struct i40e_filter_data filters[8];
};
@@ -1556,8 +1233,6 @@ struct i40e_aqc_add_delete_mirror_rule {
#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
@@ -1600,8 +1275,6 @@ struct i40e_aqc_write_ddp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_DDP_GET_CONF 0x1
-#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -1618,8 +1291,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
struct i40e_aqc_pfc_ignore {
u8 tc_bitmap;
u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
u8 reserved[14];
};
@@ -1736,7 +1407,6 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
u8 reserved1[17];
u8 tc_bw_share_credits[8];
@@ -1977,40 +1647,18 @@ struct i40e_aq_get_phy_abilities_resp {
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
-#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
-#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
__le32 eeer_val;
u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
-#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
-#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
-#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
-#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
u8 fec_cfg_curr_mod_ext_info;
-#define I40E_AQ_ENABLE_FEC_KR 0x01
-#define I40E_AQ_ENABLE_FEC_RS 0x02
#define I40E_AQ_REQUEST_FEC_KR 0x04
#define I40E_AQ_REQUEST_FEC_RS 0x08
#define I40E_AQ_ENABLE_FEC_AUTO 0x10
-#define I40E_AQ_FEC
-#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
-#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
u8 ext_comp_code;
u8 phy_id[4];
@@ -2056,21 +1704,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
-#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
@@ -2092,8 +1725,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
/* Get Link Status cmd & response data structure (direct 0x0607) */
struct i40e_aqc_get_link_status {
__le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
#define I40E_AQ_LSE_DISABLE 0x2
#define I40E_AQ_LSE_ENABLE 0x3
/* only response uses this flag */
@@ -2102,44 +1733,16 @@ struct i40e_aqc_get_link_status {
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
#define I40E_AQ_LINK_UP 0x01 /* obsolete */
-#define I40E_AQ_LINK_UP_FUNCTION 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
#define I40E_AQ_LINK_PAUSE_TX 0x20
#define I40E_AQ_LINK_PAUSE_RX 0x40
#define I40E_AQ_QUALIFIED_MODULE 0x80
u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
-/* 25G Error Codes */
-#define I40E_AQ_25G_NO_ERR 0X00
-#define I40E_AQ_25G_NOT_PRESENT 0X01
-#define I40E_AQ_25G_NVM_CRC_ERR 0X02
-#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
-#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
-#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
/* Since firmware API 1.7 loopback field keeps power class info as well */
#define I40E_AQ_LOOPBACK_MASK 0x07
-#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
-#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
@@ -2149,11 +1752,6 @@ struct i40e_aqc_get_link_status {
union {
struct {
u8 power_desc;
-#define I40E_AQ_LINK_POWER_CLASS_1 0x00
-#define I40E_AQ_LINK_POWER_CLASS_2 0x01
-#define I40E_AQ_LINK_POWER_CLASS_3 0x02
-#define I40E_AQ_LINK_POWER_CLASS_4 0x03
-#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
struct {
@@ -2171,13 +1769,7 @@ struct i40e_aqc_set_phy_int_mask {
__le16 event_mask;
#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
u8 reserved1[6];
};
@@ -2209,13 +1801,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_debug {
u8 command_flags;
-#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
- I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
/* Disable link manageability on all ports */
@@ -2247,7 +1832,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
/* Get PHY Register command (0x0629) */
struct i40e_aqc_phy_register_access {
u8 phy_interface;
-#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_address;
@@ -2274,9 +1858,7 @@ struct i40e_aqc_nvm_update {
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
@@ -2291,9 +1873,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
-#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
__le16 element_id; /* Feature/field ID */
__le16 element_id_msw; /* MSWord of field ID */
@@ -2315,16 +1894,8 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
-#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
-#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
-#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
@@ -2344,7 +1915,6 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
* no command data struct used
*/
struct i40e_aqc_nvm_oem_post_update {
-#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
u8 sel_data;
u8 reserved[7];
};
@@ -2366,9 +1936,6 @@ I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
*/
struct i40e_aqc_thermal_sensor {
u8 sensor_action;
-#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
-#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
-#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
u8 reserved[7];
__le32 addr_high;
__le32 addr_low;
@@ -2421,10 +1988,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
*/
struct i40e_aqc_alternate_write_done {
__le16 cmd_flags;
-#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
-#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
-#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
-#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
u8 reserved[14];
};
@@ -2433,8 +1996,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
/* Set OEM mode (direct 0x0905) */
struct i40e_aqc_alternate_set_mode {
__le32 mode;
-#define I40E_AQ_ALTERNATE_MODE_NONE 0
-#define I40E_AQ_ALTERNATE_MODE_OEM 1
u8 reserved[12];
};
@@ -2460,13 +2021,9 @@ struct i40e_aqc_lldp_get_mib {
#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
#define I40E_AQ_LLDP_MIB_LOCAL 0x0
#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
/* TX pause flags use I40E_AQ_LINK_TX_* above */
__le16 local_len;
__le16 remote_len;
@@ -2482,7 +2039,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
*/
struct i40e_aqc_lldp_update_mib {
u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
u8 reserved[7];
__le32 addr_high;
@@ -2521,7 +2077,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
@@ -2627,13 +2182,6 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_set_local_mib {
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
- BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
@@ -2648,9 +2196,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
* Used for stopping/starting specific LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_stop_start_specific_agent {
-#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
- BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2660,7 +2205,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
/* Restore LLDP Agent factory settings (direct 0x0A0A) */
struct i40e_aqc_lldp_restore {
u8 command;
-#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
u8 reserved[15];
};
@@ -2674,8 +2218,6 @@ struct i40e_aqc_add_udp_tunnel {
u8 protocol_type;
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
-#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
-#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
u8 reserved1[10];
};
@@ -2685,8 +2227,6 @@ struct i40e_aqc_add_udp_tunnel_completion {
__le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
u8 total_filters;
u8 reserved[11];
};
@@ -2759,16 +2299,7 @@ struct i40e_aqc_tunnel_key_structure {
u8 key1_len; /* 0 to 15 */
u8 key2_len; /* 0 to 15 */
u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
u8 network_key_index;
-#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
-#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
-#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
-#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
u8 reserved[10];
};
@@ -2777,9 +2308,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
/* OEM mode commands (direct 0xFE0x) */
struct i40e_aqc_oem_param_change {
__le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
__le16 param_value2;
u8 reserved[6];
@@ -2789,8 +2317,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
struct i40e_aqc_oem_state_change {
__le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
u8 reserved[12];
};
@@ -2826,14 +2352,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
struct i40e_acq_set_test_mode {
u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
u8 reserved[3];
u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
u8 reserved2[3];
__le32 address_high;
__le32 address_low;
@@ -2874,20 +2394,6 @@ struct i40e_aqc_debug_modify_reg {
I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
/* dump internal data (0xFF08, indirect) */
-
-#define I40E_AQ_CLUSTER_ID_AUX 0
-#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
-#define I40E_AQ_CLUSTER_ID_TXSCHED 2
-#define I40E_AQ_CLUSTER_ID_HMC 3
-#define I40E_AQ_CLUSTER_ID_MAC0 4
-#define I40E_AQ_CLUSTER_ID_MAC1 5
-#define I40E_AQ_CLUSTER_ID_MAC2 6
-#define I40E_AQ_CLUSTER_ID_MAC3 7
-#define I40E_AQ_CLUSTER_ID_DCB 8
-#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
-#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
-#define I40E_AQ_CLUSTER_ID_ALTRAM 11
-
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
u8 table_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index e81530ca08d0..befd3018183f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -3,10 +3,10 @@
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/net/intel/i40e_client.h>
#include "i40e.h"
#include "i40e_prototype.h"
-#include "i40e_client.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static struct i40e_client *registered_client;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
deleted file mode 100644
index 72994baf4941..000000000000
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_CLIENT_H_
-#define _I40E_CLIENT_H_
-
-#define I40E_CLIENT_STR_LENGTH 10
-
-/* Client interface version should be updated anytime there is a change in the
- * existing APIs or data structures.
- */
-#define I40E_CLIENT_VERSION_MAJOR 0
-#define I40E_CLIENT_VERSION_MINOR 01
-#define I40E_CLIENT_VERSION_BUILD 00
-#define I40E_CLIENT_VERSION_STR \
- __stringify(I40E_CLIENT_VERSION_MAJOR) "." \
- __stringify(I40E_CLIENT_VERSION_MINOR) "." \
- __stringify(I40E_CLIENT_VERSION_BUILD)
-
-struct i40e_client_version {
- u8 major;
- u8 minor;
- u8 build;
- u8 rsvd;
-};
-
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
-};
-
-enum i40e_client_instance_state {
- __I40E_CLIENT_INSTANCE_NONE,
- __I40E_CLIENT_INSTANCE_OPENED,
-};
-
-struct i40e_ops;
-struct i40e_client;
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
-
-struct i40e_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct i40e_qvlist_info {
- u32 num_vectors;
- struct i40e_qv_info qv_info[1];
-};
-
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
-
-/* set of LAN parameters useful for clients managed by LAN */
-
-/* Struct to hold per priority info */
-struct i40e_prio_qos_params {
- u16 qs_handle; /* qs handle for prio */
- u8 tc; /* TC mapped to prio */
- u8 reserved;
-};
-
-#define I40E_CLIENT_MAX_USER_PRIORITY 8
-/* Struct to hold Client QoS */
-struct i40e_qos_params {
- struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
-};
-
-struct i40e_params {
- struct i40e_qos_params qos;
- u16 mtu;
-};
-
-/* Structure to hold Lan device info for a client device */
-struct i40e_info {
- struct i40e_client_version version;
- u8 lanmac[6];
- struct net_device *netdev;
- struct pci_dev *pcidev;
- u8 __iomem *hw_addr;
- u8 fid; /* function id, PF id or VF id */
-#define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
- u8 ftype; /* function type, PF or VF */
- void *pf;
-
- /* All L2 params that could change during the life span of the PF
- * and needs to be communicated to the client when they change
- */
- struct i40e_qvlist_info *qvlist_info;
- struct i40e_params params;
- struct i40e_ops *ops;
-
- u16 msix_count; /* number of msix vectors*/
- /* Array down below will be dynamically allocated based on msix_count */
- struct msix_entry *msix_entries;
- u16 itr_index; /* Which ITR index the PE driver is suppose to use */
- u16 fw_maj_ver; /* firmware major version */
- u16 fw_min_ver; /* firmware minor version */
- u32 fw_build; /* firmware build number */
-};
-
-#define I40E_CLIENT_RESET_LEVEL_PF 1
-#define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
-
-struct i40e_ops {
- /* setup_q_vector_list enables queues with a particular vector */
- int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
- struct i40e_qvlist_info *qv_info);
-
- int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
- u32 vf_id, u8 *msg, u16 len);
-
- /* If the PE Engine is unresponsive, RDMA driver can request a reset.
- * The level helps determine the level of reset being requested.
- */
- void (*request_reset)(struct i40e_info *ldev,
- struct i40e_client *client, u32 level);
-
- /* API for the RDMA driver to set certain VSI flags that control
- * PE Engine.
- */
- int (*update_vsi_ctxt)(struct i40e_info *ldev,
- struct i40e_client *client,
- bool is_vf, u32 vf_id,
- u32 flag, u32 valid_flag);
-};
-
-struct i40e_client_ops {
- /* Should be called from register_client() or whenever PF is ready
- * to create a specific client instance.
- */
- int (*open)(struct i40e_info *ldev, struct i40e_client *client);
-
- /* Should be called when netdev is unavailable or when unregister
- * call comes in. If the close is happenening due to a reset being
- * triggered set the reset bit to true.
- */
- void (*close)(struct i40e_info *ldev, struct i40e_client *client,
- bool reset);
-
- /* called when some l2 managed parameters changes - mtu */
- void (*l2_param_change)(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_params *params);
-
- int (*virtchnl_receive)(struct i40e_info *ldev,
- struct i40e_client *client, u32 vf_id,
- u8 *msg, u16 len);
-
- /* called when a VF is reset by the PF */
- void (*vf_reset)(struct i40e_info *ldev,
- struct i40e_client *client, u32 vf_id);
-
- /* called when the number of VFs changes */
- void (*vf_enable)(struct i40e_info *ldev,
- struct i40e_client *client, u32 num_vfs);
-
- /* returns true if VF is capable of specified offload */
- int (*vf_capable)(struct i40e_info *ldev,
- struct i40e_client *client, u32 vf_id);
-};
-
-/* Client device */
-struct i40e_client_instance {
- struct list_head list;
- struct i40e_info lan_info;
- struct i40e_client *client;
- unsigned long state;
-};
-
-struct i40e_client {
- struct list_head list; /* list of registered clients */
- char name[I40E_CLIENT_STR_LENGTH];
- struct i40e_client_version version;
- unsigned long state; /* client state */
- atomic_t ref_cnt; /* Count of all the client devices of this kind */
- u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
- u8 type;
-#define I40E_CLIENT_IWARP 0
- const struct i40e_client_ops *ops; /* client ops provided by the client */
-};
-
-static inline bool i40e_client_is_registered(struct i40e_client *client)
-{
- return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
-}
-
-/* used by clients */
-int i40e_register_client(struct i40e_client *client);
-int i40e_unregister_client(struct i40e_client *client);
-
-#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 45b90eb11adb..afad5e9f80e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_10G_BASE_T_BC:
@@ -1455,10 +1456,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
return gpio_val;
}
-#define I40E_COMBINED_ACTIVITY 0xA
-#define I40E_FILTER_ACTIVITY 0xE
-#define I40E_LINK_ACTIVITY 0xC
-#define I40E_MAC_ACTIVITY 0xD
#define I40E_FW_LED BIT(4)
#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
@@ -4910,6 +4907,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_10G_BASE_T_BC:
@@ -4947,6 +4945,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_10G_BASE_T_BC:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index ba86ad833bee..2b1a2e81ac73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -6,10 +6,8 @@
#include "i40e_type.h"
-#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
-#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
#define I40E_DCBX_STATUS_DISABLED 7
#define I40E_TLV_TYPE_END 0
@@ -24,7 +22,6 @@
#define I40E_CEE_DCBX_OUI 0x001b21
#define I40E_CEE_DCBX_TYPE 2
-#define I40E_CEE_SUBTYPE_CTRL 1
#define I40E_CEE_SUBTYPE_PG_CFG 2
#define I40E_CEE_SUBTYPE_PFC_CFG 3
#define I40E_CEE_SUBTYPE_APP_PRI 4
@@ -105,9 +102,7 @@ struct i40e_cee_ctrl_tlv {
struct i40e_cee_feat_tlv {
struct i40e_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
-#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
-#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
u8 subtype;
u8 tlvinfo[1];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 99ea543dd245..d3ad2e3aa838 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -10,6 +10,12 @@
static struct dentry *i40e_dbg_root;
+enum ring_type {
+ RING_TYPE_RX,
+ RING_TYPE_TX,
+ RING_TYPE_XDP
+};
+
/**
* i40e_dbg_find_vsi - searches for the vsi with the given seid
* @pf: the PF structure to search for the vsi
@@ -319,6 +325,47 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
i, tx_ring->itr_setting,
ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
}
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
+
+ if (!xdp_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *xdp_ring->state,
+ xdp_ring->queue_index,
+ xdp_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
+ xdp_ring->next_to_use,
+ xdp_ring->next_to_clean,
+ xdp_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, xdp_ring->stats.packets,
+ xdp_ring->stats.bytes,
+ xdp_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ xdp_ring->tx_stats.tx_busy,
+ xdp_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: size = %i\n",
+ i, xdp_ring->size);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: DCB tc = %d\n",
+ i, xdp_ring->dcb_tc);
+ dev_info(&pf->pdev->dev,
+ " xdp_rings[%i]: itr_setting = %d (%s)\n",
+ i, xdp_ring->itr_setting,
+ ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
+ "dynamic" : "fixed");
+ }
+ }
rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d\n",
@@ -489,11 +536,12 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
* @ring_id: ring id entered by user
* @desc_n: descriptor number entered by user
* @pf: the i40e_pf created in command write
- * @is_rx_ring: true if rx, false if tx
+ * @type: enum describing whether ring is RX, TX or XDP
**/
static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
- struct i40e_pf *pf, bool is_rx_ring)
+ struct i40e_pf *pf, enum ring_type type)
{
+ bool is_rx_ring = type == RING_TYPE_RX;
struct i40e_tx_desc *txd;
union i40e_rx_desc *rxd;
struct i40e_ring *ring;
@@ -505,6 +553,10 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
+ if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
+ dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
+ return;
+ }
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
return;
@@ -516,15 +568,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return;
}
- ring = kmemdup(is_rx_ring
- ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
- sizeof(*ring), GFP_KERNEL);
+ switch (type) {
+ case RING_TYPE_RX:
+ ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ case RING_TYPE_TX:
+ ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ case RING_TYPE_XDP:
+ ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ break;
+ }
if (!ring)
return;
if (cnt == 2) {
- dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
- vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+ switch (type) {
+ case RING_TYPE_RX:
+ dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
+ break;
+ case RING_TYPE_TX:
+ dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
+ break;
+ case RING_TYPE_XDP:
+ dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
+ break;
+ }
for (i = 0; i < ring->count; i++) {
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, i);
@@ -562,7 +631,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
rxd->read.rsvd1, rxd->read.rsvd2);
}
} else {
- dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
}
out:
@@ -688,7 +757,6 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
i40e_dbg_dump_vf(pf, i);
}
-#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
* @filp: the opened file
@@ -920,13 +988,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[12], "%i %i %i",
&vsi_seid, &ring_id, &desc_n);
i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
- desc_n, pf, true);
+ desc_n, pf, RING_TYPE_RX);
} else if (strncmp(&cmd_buf[10], "tx", 2)
== 0) {
cnt = sscanf(&cmd_buf[12], "%i %i %i",
&vsi_seid, &ring_id, &desc_n);
i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
- desc_n, pf, false);
+ desc_n, pf, RING_TYPE_TX);
+ } else if (strncmp(&cmd_buf[10], "xdp", 3)
+ == 0) {
+ cnt = sscanf(&cmd_buf[13], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, RING_TYPE_XDP);
} else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
i40e_dbg_dump_aq_desc(pf);
} else {
@@ -934,6 +1008,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev,
"dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, "dump desc aq\n");
}
} else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
@@ -1104,7 +1180,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
} else {
dev_info(&pf->pdev->dev,
- "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
dev_info(&pf->pdev->dev, "dump switch\n");
dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
dev_info(&pf->pdev->dev, "dump reset stats\n");
@@ -1520,6 +1596,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc aq\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index bf15a868292f..1bcb0ec0f0c0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -23,8 +23,10 @@
#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
+#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
#define I40E_IS_X710TL_DEVICE(d) \
- ((d) == I40E_DEV_ID_10G_BASE_T_BC)
+ (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
@@ -32,8 +34,5 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index aa8026b1eb81..825c104ecba1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -428,6 +428,8 @@ struct i40e_priv_flags {
static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* NOTE: MFP setting cannot be changed */
I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("total-port-shutdown",
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
@@ -1893,8 +1895,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, i40e_driver_version_str,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
@@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
+ err = i40e_alloc_rx_bi(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
@@ -4101,7 +4104,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
new_mask &= ~I40E_VERIFY_TAG_MASK;
- /* Fall through */
+ fallthrough;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
@@ -5006,6 +5009,13 @@ flags_complete:
dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
}
+ if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+ (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+ dev_err(&pf->pdev->dev,
+ "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 1c78de838857..3113792afaff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -14,7 +14,6 @@ struct i40e_hw;
#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define I40E_HMC_PAGED_BP_SIZE 4096
#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
-#define I40E_FIRST_VF_FPM_ID 16
struct i40e_hmc_obj_info {
u64 base; /* base addr in FPM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5d807c8004f8..d8315811cbdf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5,6 +5,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/bpf.h>
+#include <generated/utsrelease.h>
/* Local includes */
#include "i40e.h"
@@ -23,15 +24,6 @@ const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
"Intel(R) Ethernet Connection XL710 Network Driver";
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 20
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) DRV_KERN
-const char i40e_driver_version_str[] = DRV_VERSION;
static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
/* a bit of forward declarations */
@@ -54,7 +46,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type);
-
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -101,7 +93,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *i40e_wq;
@@ -439,11 +430,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
@@ -787,6 +782,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -800,8 +797,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
@@ -811,6 +811,25 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* locate XDP ring */
+ p = READ_ONCE(vsi->xdp_rings[q]);
+ if (!p)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
+ tx_force_wb += p->tx_stats.tx_force_wb;
+ }
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1817,7 +1836,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps);
break;
}
- /* fall through */
+ fallthrough;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -6492,8 +6511,7 @@ out:
return err;
}
#endif /* CONFIG_I40E_DCB */
-#define SPEED_SIZE 14
-#define FC_SIZE 8
+
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
@@ -6681,21 +6699,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
}
/**
- * i40e_up - Bring the connection back up after being down
- * @vsi: the VSI being configured
- **/
-int i40e_up(struct i40e_vsi *vsi)
-{
- int err;
-
- err = i40e_vsi_configure(vsi);
- if (!err)
- err = i40e_up_complete(vsi);
-
- return err;
-}
-
-/**
* i40e_force_link_state - Force the link status
* @pf: board private structure
* @is_up: whether the link state should be forced up or down
@@ -6704,6 +6707,7 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
+ bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
i40e_status err;
u64 mask;
@@ -6739,8 +6743,11 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
/* If link needs to go up, but was not forced to go down,
* and its speed values are OK, no need for a flap
+ * if non_zero_phy_type was set, still need to force up
*/
- if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ non_zero_phy_type = true;
+ else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return I40E_SUCCESS;
/* To force link we need to set bits for all supported PHY types,
@@ -6748,10 +6755,18 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
* across two fields.
*/
mask = I40E_PHY_TYPES_BITMASK;
- config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
- config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
+ config.phy_type =
+ non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+ config.phy_type_ext =
+ non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
+ if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (is_up)
+ config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
+ else
+ config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
+ }
if (abilities.link_speed != 0)
config.link_speed = abilities.link_speed;
else
@@ -6782,12 +6797,32 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
i40e_update_link_info(hw);
}
- i40e_aq_set_link_restart_an(hw, true, NULL);
+ i40e_aq_set_link_restart_an(hw, is_up, NULL);
return I40E_SUCCESS;
}
/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ i40e_force_link_state(vsi->back, true);
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
+/**
* i40e_down - Shutdown the connection processing
* @vsi: the VSI being stopped
**/
@@ -6805,7 +6840,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
if (vsi->type == I40E_VSI_MAIN &&
- vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
+ (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+ vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
@@ -8950,13 +8986,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
}
-/* We can see up to 256 filter programming desc in transit if the filters are
- * being applied really fast; before we see the first
- * filter miss error on Rx queue 0. Accumulating enough error messages before
- * reacting will make sure we don't cause flush too often.
- */
-#define I40E_MAX_FD_PROGRAM_ERROR 256
-
/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
@@ -9851,11 +9880,11 @@ static void i40e_send_version(struct i40e_pf *pf)
{
struct i40e_driver_version dv;
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
+ dv.major_version = 0xff;
+ dv.minor_version = 0xff;
+ dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -10824,10 +10853,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
@@ -10861,7 +10890,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
@@ -10879,7 +10908,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
@@ -10892,7 +10921,7 @@ setup_rx:
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
@@ -11846,6 +11875,58 @@ bw_commit_out:
}
/**
+ * i40e_is_total_port_shutdown_enabled - read NVM and return value
+ * if total port shutdown feature is enabled for this PF
+ * @pf: board private structure
+ **/
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+{
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
+#define I40E_FEATURES_ENABLE_PTR 0x2A
+#define I40E_CURRENT_SETTING_PTR 0x2B
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
+ i40e_status read_status = I40E_SUCCESS;
+ u16 sr_emp_sr_settings_ptr = 0;
+ u16 features_enable = 0;
+ u16 link_behavior = 0;
+ bool ret = false;
+
+ read_status = i40e_read_nvm_word(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ &sr_emp_sr_settings_ptr);
+ if (read_status)
+ goto err_nvm;
+ read_status = i40e_read_nvm_word(&pf->hw,
+ sr_emp_sr_settings_ptr +
+ I40E_FEATURES_ENABLE_PTR,
+ &features_enable);
+ if (read_status)
+ goto err_nvm;
+ if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
+ read_status = i40e_read_nvm_module_data(&pf->hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ I40E_CURRENT_SETTING_PTR,
+ I40E_LINK_BEHAVIOR_WORD_OFFSET,
+ I40E_LINK_BEHAVIOR_WORD_LENGTH,
+ &link_behavior);
+ if (read_status)
+ goto err_nvm;
+ link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
+ ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
+ }
+ return ret;
+
+err_nvm:
+ dev_warn(&pf->pdev->dev,
+ "total-port-shutdown feature is off due to read nvm error: %s\n",
+ i40e_stat_str(&pf->hw, read_status));
+ return ret;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -12020,6 +12101,16 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->tx_timeout_recovery_level = 1;
+ if (pf->hw.mac.type != I40E_MAC_X722 &&
+ i40e_is_total_port_shutdown_enabled(pf)) {
+ /* Link down on close must be on when total port shutdown
+ * is enabled for a given port
+ */
+ pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ dev_info(&pf->pdev->dev,
+ "total-port-shutdown was enabled, link-down-on-close is forced on\n");
+ }
mutex_init(&pf->switch_mutex);
sw_init_done:
@@ -12832,9 +12923,6 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
@@ -13694,8 +13782,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
/* Setup DCB netlink interface */
i40e_dcbnl_setup(vsi);
#endif /* CONFIG_I40E_DCB */
- /* fall through */
-
+ fallthrough;
case I40E_VSI_FDIR:
/* set up vectors and rings if needed */
ret = i40e_vsi_setup_vectors(vsi);
@@ -13711,7 +13798,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
i40e_vsi_reset_stats(vsi);
break;
-
default:
/* no netdev or rings for the other VSI types */
break;
@@ -14565,28 +14651,17 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
- bool is_recovery_mode = false;
-
- if (pf->hw.mac.type == I40E_MAC_XL710)
- is_recovery_mode =
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
- val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
- if (pf->hw.mac.type == I40E_MAC_X722)
- is_recovery_mode =
- val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
- val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
- if (is_recovery_mode) {
- dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
- dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
return true;
}
- if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
- dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
return false;
}
@@ -14614,29 +14689,68 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
**/
static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
{
- const unsigned short MAX_CNT = 1000;
- const unsigned short MSECS = 10;
+ /* wait max 10 seconds for PF reset to succeed */
+ const unsigned long time_end = jiffies + 10 * HZ;
+
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- int cnt;
- for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+ usleep_range(10000, 20000);
ret = i40e_pf_reset(hw);
- if (!ret)
- break;
- msleep(MSECS);
}
- if (cnt == MAX_CNT) {
+ if (ret == I40E_SUCCESS)
+ pf->pfr_count++;
+ else
dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
- return ret;
- }
- pf->pfr_count++;
return ret;
}
/**
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
+ * @pf: board private structure
+ *
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
+ * Every time when unexpected EMP Reset occurs the FW increments
+ * a counter of unexpected EMP Resets. When the counter reaches 10
+ * the FW should enter the Recovery mode
+ *
+ * Returns true if FW issued unexpected EMP Reset
+ **/
+static bool i40e_check_fw_empr(struct i40e_pf *pf)
+{
+ const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
+ I40E_GL_FWSTS_FWS1B_MASK;
+ return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
+ (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
+}
+
+/**
+ * i40e_handle_resets - handle EMP resets and PF resets
+ * @pf: board private structure
+ *
+ * Handle both EMP resets and PF resets and conclude whether there are
+ * any issues regarding these resets. If there are any issues then
+ * generate log entry.
+ *
+ * Return 0 if NIC is healthy or negative value when there are issues
+ * with resets
+ **/
+static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+{
+ const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const bool is_empr = i40e_check_fw_empr(pf);
+
+ if (is_empr || pfr != I40E_SUCCESS)
+ dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+
+ return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+}
+
+/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
* @hw: ptr to the hardware info
@@ -14872,11 +14986,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
- err = i40e_pf_loop_reset(pf);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ err = i40e_handle_resets(pf);
+ if (err)
goto err_pf_reset;
- }
i40e_check_recovery_mode(pf);
@@ -15272,6 +15384,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_stat_str(&pf->hw, err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* make sure the MFS hasn't been set lower than the default */
+#define MAX_FRAME_SIZE_DEFAULT 0x2600
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+ dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+ i, val);
+
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
* PAUSE or PFC frames and potentially controlling traffic for other
@@ -15782,8 +15902,7 @@ static struct pci_driver i40e_driver = {
**/
static int __init i40e_init_module(void)
{
- pr_info("%s: %s - version %s\n", i40e_driver_name,
- i40e_driver_string, i40e_driver_version_str);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
/* There is no need to throttle the number of active tasks because
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index c302ef2524f8..2f6815b2f8df 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -26,7 +26,6 @@ do { \
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 9bf1ad4319f5..ff7b19c6bc73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -586,7 +586,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
return -ERANGE;
- /* fall through */
+ fallthrough;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index d35d690ca10f..564df22f3f46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -4,53 +4,14 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
#define I40E_PF_ARQH_ARQH_SHIFT 0
#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
@@ -60,20 +21,10 @@
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
@@ -83,786 +34,136 @@
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
-#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
-#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -872,8 +173,6 @@
#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
@@ -881,7 +180,6 @@
#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
@@ -891,93 +189,13 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
@@ -986,14 +204,8 @@
#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
@@ -1017,10 +229,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
@@ -1029,43 +237,17 @@
#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
@@ -1075,13 +257,11 @@
#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
@@ -1091,160 +271,45 @@
#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
@@ -1257,19 +322,12 @@
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
#define I40E_QTX_CTL_PF_INDX_SHIFT 2
@@ -1277,43 +335,22 @@
#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
@@ -1322,789 +359,47 @@
#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
-#define I40E_VFMSIX_PBA1_MAX_INDEX 19
-#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
-#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
#define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GL_PRS_FVBM_MAX_INDEX 3
-#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
-#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
-#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
-#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
-#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
-#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
@@ -2112,36 +407,7 @@
#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
@@ -2159,54 +425,19 @@
#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
#define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
@@ -2215,14 +446,7 @@
#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
@@ -2230,775 +454,148 @@
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3033,2304 +630,53 @@
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
#define I40E_VP_MDET_TX_VALID_SHIFT 0
#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
#define I40E_PFPM_WUFC_MAG_SHIFT 1
#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
-#define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
-#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
-#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
-#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
-#define I40E_VFINT_ITR01_MAX_INDEX 2
-#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN1_MAX_INDEX 2
-#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_QRX_TAIL1_MAX_INDEX 15
-#define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
-#define I40E_QTX_TAIL1_MAX_INDEX 15
-#define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
-#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD_MAX_INDEX 16
-#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG_MAX_INDEX 16
-#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD_MAX_INDEX 16
-#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
-#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_VFQF_HENA_MAX_INDEX 1
-#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY_MAX_INDEX 12
-#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define I40E_VFQF_HLUT_MAX_INDEX 15
-#define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
-#define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
-#define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
-#define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION_MAX_INDEX 7
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
-#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
-#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
-#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
-#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
-#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
-#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
-#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
-#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
-#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
-#define I40E_MNGSB_FDS_START_BC_SHIFT 0
-#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
-#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
-#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
-#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
-#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
-#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
-#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
-#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
-#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
-#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
-#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
-#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
-#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
-#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
-#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
-#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
-#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
-#define I40E_GL_FWSTS_FWROWD_SHIFT 8
-#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
-#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
-#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
-#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
-#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
-#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
-#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
-#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
-#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
-#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
-#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
-#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
-#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
-#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
-#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
-#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
-#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
-#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
-#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
-#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
-#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
-#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
-#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
-#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
-#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
-#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
-#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
-#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
-#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
-#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
-#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
-#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
-#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
-#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
-#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
-#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
-#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
-#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
-#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
-#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QBASE_MAX_INDEX 127
-#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
-#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
-#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
-#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
-#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
-#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
-#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
-#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
-#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
-#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
-#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
-#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
-#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
-#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
-#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
-#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
-#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
-#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
-#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
-#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
-#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
-#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
-#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
-#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
-#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
-#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
-#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
-#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
-#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
-#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
-#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
-#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
-#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
-#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
-#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
-#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
-#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
-#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
-#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
-#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
-#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
-#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
-#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
-#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
-#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
-#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
-#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
-#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
-#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
-#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
-#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
-#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
-#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
-#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
-#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
-#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
-#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
-#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
-#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
-#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
-#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
-#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
-#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
-#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
-#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
-#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
-#define I40E_MNGSB_DADD_ADDR_SHIFT 0
-#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
-#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
-#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
-#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
-#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
-#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
-#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
-#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
-#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
-#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
-#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
-#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
-#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
-#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
-#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
-#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
-#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
-#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
-#define I40E_MNGSB_RDATA_DATA_SHIFT 0
-#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
-#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
-#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
-#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
-#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
-#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
-#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
-#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
-#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
-#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
-#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
-#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
-#define I40E_MNGSB_RHDR0_EH_SHIFT 31
-#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
-#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
-#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
-#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
-#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
-#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
-#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
-#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
-#define I40E_MNGSB_WDATA_DATA_SHIFT 0
-#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
-#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
-#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
-#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
-#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
-#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
-#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
-#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
-#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
-#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
-#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
-#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
-#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
-#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
-#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
-#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
-#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
-#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
-#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
-#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
-#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
-#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
-#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
-#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
-#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
-#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
-#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
-#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
-#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
-#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
-#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
-#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
-#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
-#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
-#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
-#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
-#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
-#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
-#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
-#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
-#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
-#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
-#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
-#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
-#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
-#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
-#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
-#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
-#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
-#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
-#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
-#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
-#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
-#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
-#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
-#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
-#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
-#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_FD_MSK_MAX_INDEX 1
-#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
-#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
-#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
-#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
-#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
-#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
-#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
-#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
-#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
-#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_ORT_MAX_INDEX 63
#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
-#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
-#define I40E_GLQF_PIT_MAX_INDEX 23
-#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
-#define I40E_GLQF_PIT_FSIZE_SHIFT 5
-#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
-#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
-#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
-#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
-#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
-#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
-#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
-#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
-#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
-#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
-#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
-#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
/* Redefined for X722 family */
-#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
-#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
-#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
-#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
-#define I40E_VSIQF_HKEY_MAX_INDEX 12
-#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
-#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
-#define I40E_VSIQF_HLUT_MAX_INDEX 15
-#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
-#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
-#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
-#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
-#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
-#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
-#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
-#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
-#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
-#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
-#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
-#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
-#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f9555c847f73..3e5c566ceb01 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1690,7 +1690,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
case I40E_RX_PTYPE_INNER_PROT_UDP:
case I40E_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* fall though */
+ fallthrough;
default:
break;
}
@@ -2210,10 +2210,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fall through -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
@@ -2580,7 +2580,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
*/
i40e_for_each_ring(ring, q_vector->tx) {
bool wd = ring->xsk_umem ?
- i40e_clean_xdp_tx_irq(vsi, ring, budget) :
+ i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
if (!wd) {
@@ -2595,10 +2595,16 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
goto tx_only;
- /* We attempt to distribute budget to each Rx queue fairly, but don't
- * allow the budget to go below 1 because that would exit polling early.
- */
- budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+ /* normally we have 1 Rx ring per q_vector */
+ if (unlikely(q_vector->num_ringpairs > 1))
+ /* We attempt to distribute budget to each Rx queue fairly, but
+ * don't allow the budget to go below 1 because that would exit
+ * polling early.
+ */
+ budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
+ else
+ /* Max of 1 Rx ring in this q_vector so give it the budget */
+ budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
int cleaned = ring->xsk_umem ?
@@ -3538,6 +3544,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
*/
smp_wmb();
+ xdp_ring->xdp_tx_active++;
i++;
if (i == xdp_ring->count)
i = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 5c255977fd58..4036893d6825 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -18,10 +18,7 @@
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
-#define I40E_ITR_100K 10 /* all values below must be even */
-#define I40E_ITR_50K 20
#define I40E_ITR_20K 50
-#define I40E_ITR_18K 60
#define I40E_ITR_8K 122
#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
@@ -52,9 +49,6 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
else
return 0;
}
-#define I40E_INTRL_8K 125 /* 8000 ints/sec */
-#define I40E_INTRL_62K 16 /* 62500 ints/sec */
-#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF
@@ -73,7 +67,6 @@ enum i40e_dyn_idx_t {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
-#define I40E_PE_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
@@ -193,13 +186,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
-#define I40E_RX_INCREMENT(r, i) \
- do { \
- (i)++; \
- if ((i) == (r)->count) \
- i = 0; \
- r->next_to_clean = i; \
- } while (0)
#define I40E_RX_NEXT_DESC(r, i, n) \
do { \
@@ -209,11 +195,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
(n) = I40E_RX_DESC((r), (i)); \
} while (0)
-#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
- do { \
- I40E_RX_NEXT_DESC((r), (i), (n)); \
- prefetch((n)); \
- } while (0)
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
@@ -262,15 +243,12 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
/* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
-#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3)
#define I40E_TX_FLAGS_IPV4 BIT(4)
#define I40E_TX_FLAGS_IPV6 BIT(5)
-#define I40E_TX_FLAGS_FCCRC BIT(6)
-#define I40E_TX_FLAGS_FSO BIT(7)
#define I40E_TX_FLAGS_TSYN BIT(8)
#define I40E_TX_FLAGS_FD_SB BIT(9)
#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
@@ -332,9 +310,7 @@ enum i40e_ring_state_t {
/* some useful defines for virtchannel interface, which
* is the only remaining user of header split
*/
-#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
@@ -371,6 +347,7 @@ struct i40e_ring {
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
+ u16 xdp_tx_active;
u8 atr_sample_rate;
u8 atr_count;
@@ -444,7 +421,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring)
#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
#define I40E_ITR_ADAPTIVE_BULK 0x0000
-#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
struct i40e_ring_container {
struct i40e_ring *ring; /* pointer to linked list of ring(s) */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 63e098f7cb63..52410d609ba1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -84,8 +84,6 @@ enum i40e_debug_mask {
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
@@ -178,21 +176,9 @@ struct i40e_link_status {
u8 module_type[3];
/* 1st byte: module identifier */
#define I40E_MODULE_TYPE_SFP 0x03
-#define I40E_MODULE_TYPE_QSFP 0x0D
- /* 2nd byte: ethernet compliance codes for 10/40G */
-#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
-#define I40E_MODULE_TYPE_40G_LR4 0x02
-#define I40E_MODULE_TYPE_40G_SR4 0x04
-#define I40E_MODULE_TYPE_40G_CR4 0x08
-#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
-#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
-#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
-#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
/* 3rd byte: ethernet compliance codes for 1G */
#define I40E_MODULE_TYPE_1000BASE_SX 0x01
#define I40E_MODULE_TYPE_1000BASE_LX 0x02
-#define I40E_MODULE_TYPE_1000BASE_CX 0x04
-#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
struct i40e_phy_info {
@@ -262,9 +248,6 @@ struct i40e_phy_info {
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
-#define I40E_NVM_IMAGE_TYPE_EVB 0x0
-#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
-#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
/* Cloud filter modes:
* Mode1: Filter on L4 port only
@@ -273,14 +256,10 @@ struct i40e_hw_capabilities {
*/
#define I40E_CLOUD_FILTER_MODE1 0x6
#define I40E_CLOUD_FILTER_MODE2 0x7
-#define I40E_CLOUD_FILTER_MODE3 0x8
#define I40E_SWITCH_MODE_MASK 0xF
u32 management_mode;
u32 mng_protocols_over_mctp;
-#define I40E_MNG_PROTOCOL_PLDM 0x2
-#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
-#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -294,13 +273,8 @@ struct i40e_hw_capabilities {
bool flex10_enable;
bool flex10_capable;
u32 flex10_mode;
-#define I40E_FLEX10_MODE_UNKNOWN 0x0
-#define I40E_FLEX10_MODE_DCC 0x1
-#define I40E_FLEX10_MODE_DCI 0x2
u32 flex10_status;
-#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
-#define I40E_FLEX10_STATUS_VC_MODE 0x2
bool sec_rev_disabled;
bool update_disabled;
@@ -421,11 +395,8 @@ enum i40e_nvmupd_state {
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
-#define I40E_NVM_ADAPT_SHIFT 16
-#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
#define I40E_NVMUPD_MAX_DATA 4096
-#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
struct i40e_nvm_access {
u32 command;
@@ -438,7 +409,6 @@ struct i40e_nvm_access {
/* (Q)SFP module access definitions */
#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
-#define I40E_MODULE_TYPE_ADDR 0x00
#define I40E_MODULE_REVISION_ADDR 0x01
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
@@ -547,7 +517,6 @@ struct i40e_dcbx_config {
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u8 app_mode;
-#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
@@ -895,9 +864,6 @@ enum i40e_rx_ptype_payload_layer {
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
-#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
-#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
@@ -926,7 +892,6 @@ enum i40e_rx_desc_pe_status_bits {
I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
};
-#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
@@ -963,8 +928,6 @@ struct i40e_tx_desc {
__le64 cmd_type_offset_bsz;
};
-#define I40E_TXD_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
enum i40e_tx_desc_dtype_value {
I40E_TX_DESC_DTYPE_DATA = 0x0,
@@ -980,7 +943,6 @@ enum i40e_tx_desc_dtype_value {
};
#define I40E_TXD_QW1_CMD_SHIFT 4
-#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
enum i40e_tx_desc_cmd_bits {
I40E_TX_DESC_CMD_EOP = 0x0001,
@@ -1004,8 +966,6 @@ enum i40e_tx_desc_cmd_bits {
};
#define I40E_TXD_QW1_OFFSET_SHIFT 16
-#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
- I40E_TXD_QW1_OFFSET_SHIFT)
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
@@ -1015,11 +975,8 @@ enum i40e_tx_desc_length_fields {
};
#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
-#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
- I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
#define I40E_TXD_QW1_L2TAG1_SHIFT 48
-#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
/* Context descriptors */
struct i40e_tx_context_desc {
@@ -1029,11 +986,8 @@ struct i40e_tx_context_desc {
__le64 type_cmd_tso_mss;
};
-#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
-#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
enum i40e_tx_ctx_desc_cmd_bits {
I40E_TX_CTX_DESC_TSO = 0x01,
@@ -1048,19 +1002,10 @@ enum i40e_tx_ctx_desc_cmd_bits {
};
#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
-#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
- I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
-#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
- I40E_TXD_CTX_QW1_MSS_SHIFT)
-#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
-#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
-#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
-#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
- I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
enum i40e_tx_ctx_desc_eipt_offload {
I40E_TX_CTX_EXT_IP_NONE = 0x0,
@@ -1070,28 +1015,16 @@ enum i40e_tx_ctx_desc_eipt_offload {
};
#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
-#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
- BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
-#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
-#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
- I40E_TXD_CTX_QW0_NATLEN_SHIFT)
-#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
-#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
- I40E_TXD_CTX_QW0_DECTTL_SHIFT)
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
@@ -1161,11 +1094,8 @@ enum i40e_filter_program_desc_fd_status {
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
-#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
- I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
enum i40e_filter_program_desc_pcmd {
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
@@ -1316,7 +1246,6 @@ struct i40e_hw_port_stats {
#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
-#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
@@ -1329,7 +1258,6 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
-#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
@@ -1463,14 +1391,11 @@ struct i40e_lldp_variables {
/* Offsets into Alternate Ram */
#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
-#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
-#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
/* Alternate Ram Bandwidth Masks */
#define I40E_ALT_BW_VALUE_MASK 0xFF
-#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
#define I40E_ALT_BW_VALID_MASK 0x80000000
/* RSS Hash Table Size */
@@ -1529,9 +1454,7 @@ struct i40e_package_header {
/* Generic segment header */
struct i40e_generic_seg_header {
#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_NOTES 0x00000002
#define SEGMENT_TYPE_I40E 0x00000011
-#define SEGMENT_TYPE_X722 0x00000012
u32 type;
struct i40e_ddp_version version;
u32 size;
@@ -1541,7 +1464,6 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
-#define I40E_DDP_TRACKID_RDONLY 0
#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
char name[I40E_DDP_NAME_SIZE];
@@ -1575,10 +1497,6 @@ struct i40e_profile_section_header {
#define SECTION_TYPE_AQ 0x00000801
#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
-#define SECTION_TYPE_NAME 0x80000001
-#define SECTION_TYPE_PROTO 0x80000002
-#define SECTION_TYPE_PCTYPE 0x80000003
-#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 56b9e445732b..8e133d6545bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1106,39 +1106,81 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
return -EIO;
}
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int num_vlans = 0, bkt;
+
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
/**
- * i40e_config_vf_promiscuous_mode
- * @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
- * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the VSI
+ * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
+ * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
+ * This array is allocated here, but has to be freed in caller.
*
- * Called from the VF to configure the promiscuous mode of
- * VF vsis and from the VF reset path to reset promiscuous mode.
+ * Called to get number of VLANs and VLAN list present in mac_filter_hash.
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
+ s16 **vlan_list)
{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_mac_filter *f;
- i40e_status aq_ret = 0;
- struct i40e_vsi *vsi;
+ int i = 0;
int bkt;
- vsi = i40e_find_vsi_from_id(pf, vsi_id);
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
- return I40E_ERR_PARAM;
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
+ *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
+ if (!(*vlan_list))
+ goto err;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
- allmulti,
- vf->port_vlan_id,
- NULL);
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ (*vlan_list)[i++] = f->vlan;
+ }
+err:
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the VF struct
+ * @seid: VSI number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ * for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ * for a given VLAN
+ * @vl: List of VLANs - apply filter for given VLANs
+ * @num_vlans: Number of elements in @vl
+ **/
+static i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ bool unicast_enable, s16 *vl, int num_vlans)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+ int i;
+
+ /* No VLAN to set promisc on, set on VSI */
+ if (!num_vlans || !vl) {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+ multi_enable,
+ NULL);
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
@@ -1147,13 +1189,14 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
return aq_ret;
}
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
- alluni,
- vf->port_vlan_id,
- NULL);
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+ unicast_enable,
+ NULL, true);
+
if (aq_ret) {
int aq_err = pf->hw.aq.asq_last_status;
@@ -1163,68 +1206,84 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
+
return aq_ret;
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
- vsi->seid,
- allmulti,
- f->vlan,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ }
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
+ for (i = 0; i < num_vlans; i++) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+ multi_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
- vsi->seid,
- alluni,
- f->vlan,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+ unicast_enable,
+ vl[i], NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
- return aq_ret;
}
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
- NULL);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ return aq_ret;
+}
- dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
+{
+ i40e_status aq_ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+ int num_vlans;
+ s16 *vl;
+
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+ return I40E_ERR_PARAM;
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+ alluni, &vf->port_vlan_id, 1);
return aq_ret;
- }
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
- NULL, true);
- if (aq_ret) {
- int aq_err = pf->hw.aq.asq_last_status;
+ if (!vl)
+ return I40E_ERR_NO_MEMORY;
- dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ vl, num_vlans);
+ kfree(vl);
+ return aq_ret;
}
+ /* no VLANs to set on, set on VSI */
+ aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+ NULL, 0);
return aq_ret;
}
@@ -1973,25 +2032,6 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
}
/**
- * i40e_getnum_vf_vsi_vlan_filters
- * @vsi: pointer to the vsi
- *
- * called to get the number of VLANs offloaded on this VF
- **/
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
-{
- struct i40e_mac_filter *f;
- int num_vlans = 0, bkt;
-
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
- num_vlans++;
- }
-
- return num_vlans;
-}
-
-/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 631248c0981a..5491215d81de 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -10,7 +10,6 @@
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
-#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
#define I40E_VLAN_PRIORITY_SHIFT 13
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 7276580cbe64..8ce57b507a21 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -168,10 +168,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = I40E_XDP_CONSUMED;
break;
@@ -378,19 +378,13 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
+ unsigned int sent_frames = 0, total_bytes = 0;
struct i40e_tx_desc *tx_desc = NULL;
struct i40e_tx_buffer *tx_bi;
- bool work_done = true;
struct xdp_desc desc;
dma_addr_t dma;
while (budget-- > 0) {
- if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- work_done = false;
- break;
- }
-
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
@@ -408,6 +402,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
| I40E_TX_DESC_CMD_EOP,
0, desc.len, 0);
+ sent_frames++;
+ total_bytes += tx_bi->bytecount;
+
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
xdp_ring->next_to_use = 0;
@@ -420,9 +417,10 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
- return !!budget && work_done;
+ return !!budget;
}
/**
@@ -434,6 +432,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
struct i40e_tx_buffer *tx_bi)
{
xdp_return_frame(tx_bi->xdpf);
+ tx_ring->xdp_tx_active--;
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_bi, dma),
dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
@@ -442,32 +441,29 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
/**
* i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
+ * @vsi: Current VSI
* @tx_ring: XDP Tx ring
- * @tx_bi: Tx buffer info to clean
*
* Returns true if cleanup/tranmission is done.
**/
-bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget)
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
- u32 i, completed_frames, frames_ready, xsk_frames = 0;
struct xdp_umem *umem = tx_ring->xsk_umem;
+ u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
- bool work_done = true, xmit_done;
struct i40e_tx_buffer *tx_bi;
+ unsigned int ntc;
if (head_idx < tx_ring->next_to_clean)
head_idx += tx_ring->count;
- frames_ready = head_idx - tx_ring->next_to_clean;
+ completed_frames = head_idx - tx_ring->next_to_clean;
- if (frames_ready == 0) {
+ if (completed_frames == 0)
goto out_xmit;
- } else if (frames_ready > budget) {
- completed_frames = budget;
- work_done = false;
- } else {
- completed_frames = frames_ready;
+
+ if (likely(!tx_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
+ goto skip;
}
ntc = tx_ring->next_to_clean;
@@ -475,18 +471,18 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
for (i = 0; i < completed_frames; i++) {
tx_bi = &tx_ring->tx_bi[ntc];
- if (tx_bi->xdpf)
+ if (tx_bi->xdpf) {
i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
- else
+ tx_bi->xdpf = NULL;
+ } else {
xsk_frames++;
-
- tx_bi->xdpf = NULL;
- total_bytes += tx_bi->bytecount;
+ }
if (++ntc >= tx_ring->count)
ntc = 0;
}
+skip:
tx_ring->next_to_clean += completed_frames;
if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
tx_ring->next_to_clean -= tx_ring->count;
@@ -494,16 +490,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
- i40e_arm_wb(tx_ring, vsi, budget);
- i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
+ i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
- xmit_done = i40e_xmit_zc(tx_ring, budget);
-
- return work_done && xmit_done;
+ return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
/**
@@ -567,7 +560,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
/**
* i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
- * @xdp_ring: XDP Tx ring
+ * @tx_ring: XDP Tx ring
**/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ea919a7d60ec..c524c142127f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -15,8 +15,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
-bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget);
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 10b805ba03ee..8a65525a7c0d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -375,7 +375,6 @@ struct iavf_device {
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
-extern const char iavf_driver_version[];
extern struct workqueue_struct *iavf_wq;
int iavf_up(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 181573822942..c93567f4d0f7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -571,7 +571,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->version, iavf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index fa82768e5eda..d870343cf689 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -21,16 +21,6 @@ char iavf_driver_name[] = "iavf";
static const char iavf_driver_string[] =
"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 3
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) \
- DRV_KERN
-const char iavf_driver_version[] = DRV_VERSION;
static const char iavf_copyright[] =
"Copyright (c) 2013 - 2018 Intel Corporation.";
@@ -57,7 +47,6 @@ MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
@@ -1863,8 +1852,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
- if (!adapter->rss_key || !adapter->rss_lut)
+ if (!adapter->rss_key || !adapter->rss_lut) {
+ err = -ENOMEM;
goto err_mem;
+ }
if (RSS_AQ(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
else
@@ -1946,7 +1937,10 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_send_api_ver(adapter);
}
} else {
- if (!iavf_process_aq_command(adapter) &&
+ /* An error will be returned if no commands were
+ * processed; use this opportunity to update stats
+ */
+ if (iavf_process_aq_command(adapter) &&
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
@@ -3772,7 +3766,6 @@ err_dma:
return err;
}
-#ifdef CONFIG_PM
/**
* iavf_suspend - Power management suspend routine
* @pdev: PCI device information struct
@@ -3780,11 +3773,10 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused iavf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
- int retval = 0;
netif_device_detach(netdev);
@@ -3802,12 +3794,6 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- pci_disable_device(pdev);
-
return 0;
}
@@ -3817,24 +3803,13 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int iavf_resume(struct pci_dev *pdev)
+static int __maybe_unused iavf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
- return err;
- }
pci_set_master(pdev);
rtnl_lock();
@@ -3858,7 +3833,6 @@ static int iavf_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
/**
* iavf_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -3960,16 +3934,15 @@ static void iavf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+
static struct pci_driver iavf_driver = {
- .name = iavf_driver_name,
- .id_table = iavf_pci_tbl,
- .probe = iavf_probe,
- .remove = iavf_remove,
-#ifdef CONFIG_PM
- .suspend = iavf_suspend,
- .resume = iavf_resume,
-#endif
- .shutdown = iavf_shutdown,
+ .name = iavf_driver_name,
+ .id_table = iavf_pci_tbl,
+ .probe = iavf_probe,
+ .remove = iavf_remove,
+ .driver.pm = &iavf_pm_ops,
+ .shutdown = iavf_shutdown,
};
/**
@@ -3982,8 +3955,7 @@ static int __init iavf_init_module(void)
{
int ret;
- pr_info("iavf: %s - version %s\n", iavf_driver_string,
- iavf_driver_version);
+ pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index e091bab7e770..ca041b39ffda 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1007,7 +1007,7 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
case IAVF_RX_PTYPE_INNER_PROT_UDP:
case IAVF_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* fall though */
+ fallthrough;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 7190a40c540c..de9fda78b43a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -192,14 +192,6 @@ struct iavf_hw {
char err_str[16];
};
-struct iavf_driver_version {
- u8 major_version;
- u8 minor_version;
- u8 build_version;
- u8 subbuild_version;
- u8 driver_string[32];
-};
-
/* RX Descriptors */
union iavf_16byte_rx_desc {
struct {
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 980bbcc64b4b..6da4f43f2348 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -23,6 +23,7 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
+ ice_fw_update.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5792ee616b5c..fe140ff38f74 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
@@ -55,7 +56,6 @@
#include "ice_xsk.h"
#include "ice_arfs.h"
-extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
@@ -223,6 +223,8 @@ enum ice_state {
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
+ __ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ __ICE_PHY_INIT_COMPLETE,
__ICE_STATE_NBITS /* must be last */
};
@@ -254,6 +256,7 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
+ u32 rx_gro_dropped;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
@@ -358,12 +361,14 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -374,6 +379,7 @@ struct ice_pf {
struct devlink_port devlink_port;
struct devlink_region *nvm_region;
+ struct devlink_region *devcaps_region;
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
@@ -408,6 +414,12 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
+
+ /* spinlock to protect the AdminQ wait list */
+ spinlock_t aq_wait_lock;
+ struct hlist_head aq_wait_list;
+ wait_queue_head_t aq_wait_queue;
+
u32 hw_csum_rx_error;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
@@ -423,6 +435,8 @@ struct ice_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u8 wol_ena : 1; /* software state of WoL */
+ u32 wakeup_reason; /* last wakeup reason */
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
@@ -435,6 +449,10 @@ struct ice_pf {
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
u32 sw_int_count;
+
+ __le64 nvm_phy_type_lo; /* NVM PHY type low */
+ __le64 nvm_phy_type_hi; /* NVM PHY type high */
+ struct ice_link_default_override_tlv link_dflt_override;
};
struct ice_netdev_priv {
@@ -568,6 +586,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
+bool ice_is_wol_supported(struct ice_pf *pf);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -582,6 +601,8 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 92f82f2a8af4..ba9375218fef 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -109,6 +109,13 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
+#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define ICE_AQC_CAPS_OROM_VER 0x004A
+#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define ICE_AQC_CAPS_NET_VER 0x004C
+#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
u8 minor_ver;
@@ -215,13 +222,6 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
-/* The response buffer is as follows. Note that the length of the
- * elements array varies with the length of the command response.
- */
-struct ice_aqc_get_sw_cfg_resp {
- struct ice_aqc_get_sw_cfg_resp_elem elements[1];
-};
-
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -274,7 +274,7 @@ struct ice_aqc_alloc_free_res_elem {
#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
(0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
__le16 num_elems;
- struct ice_aqc_res_elem elem[1];
+ struct ice_aqc_res_elem elem[];
};
/* Add VSI (indirect 0x0210)
@@ -568,8 +568,8 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[1];
-} __packed;
+ u8 hdr[];
+};
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
@@ -578,7 +578,6 @@ struct ice_sw_rule_lkup_rx_tx {
struct ice_sw_rule_lg_act {
__le16 index; /* Index in large action table */
__le16 size;
- __le32 act[1]; /* array of size for actions */
/* Max number of large actions */
#define ICE_MAX_LG_ACT 4
/* Bit 0:1 - Action type */
@@ -629,6 +628,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT 0x7
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+ __le32 act[]; /* array of size for actions */
};
/* Add/Update/Remove VSI list command/response entry
@@ -638,7 +638,7 @@ struct ice_sw_rule_lg_act {
struct ice_sw_rule_vsi_list {
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
- __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+ __le16 vsi[]; /* Array of number_vsi VSI numbers */
};
/* Query VSI list command/response entry */
@@ -695,14 +695,6 @@ struct ice_aqc_sched_elem_cmd {
__le32 addr_low;
};
-/* This is the buffer for:
- * Suspend Nodes (indirect 0x0409)
- * Resume Nodes (indirect 0x040A)
- */
-struct ice_aqc_suspend_resume_elem {
- __le32 teid[1];
-};
-
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -753,15 +745,7 @@ struct ice_aqc_txsched_topo_grp_info_hdr {
struct ice_aqc_add_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_conf_elem {
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_get_elem {
- struct ice_aqc_txsched_elem_data generic[1];
+ struct ice_aqc_txsched_elem_data generic[];
};
struct ice_aqc_get_topo_elem {
@@ -772,7 +756,7 @@ struct ice_aqc_get_topo_elem {
struct ice_aqc_delete_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[];
};
/* Query Port ETS (indirect 0x040E)
@@ -835,10 +819,6 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
-struct ice_aqc_rl_profile_generic_elem {
- struct ice_aqc_rl_profile_elem generic[1];
-};
-
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -988,8 +968,11 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0)
- u8 low_power_ctrl;
+ u8 low_power_ctrl_an;
#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1)
+#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2)
+#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3)
__le16 eee_cap;
#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0)
#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1)
@@ -1010,12 +993,14 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0)
- u8 rsvd1; /* Byte 35 reserved */
+ u8 module_compliance_enforcement;
+#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0)
u8 extended_compliance_code;
#define ICE_MODULE_TYPE_TOTAL_BYTE 3
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_IDENT 1
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
@@ -1059,11 +1044,11 @@ struct ice_aqc_set_phy_cfg_data {
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
#define ICE_AQ_PHY_ENA_LESM BIT(6)
#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
- u8 low_power_ctrl;
+ u8 low_power_ctrl_an;
__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
__le16 eeer_value;
u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
- u8 rsvd1;
+ u8 module_compliance_enforcement;
};
/* Set MAC Config command data structure (direct 0x0603) */
@@ -1174,6 +1159,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
__le16 link_speed;
+#define ICE_AQ_LINK_SPEED_M 0x7FF
#define ICE_AQ_LINK_SPEED_10MB BIT(0)
#define ICE_AQ_LINK_SPEED_100MB BIT(1)
#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
@@ -1216,6 +1202,57 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
+struct ice_aqc_link_topo_addr {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S)
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
+ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
+#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2
+#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
+ u8 index;
+ __le16 handle;
+#define ICE_AQC_LINK_TOPO_HANDLE_S 0
+#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ice_aqc_get_link_topo {
+ struct ice_aqc_link_topo_addr addr;
+ u8 node_part_num;
+ u8 rsvd[9];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1268,7 +1305,14 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4)
+#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1317,6 +1361,67 @@ struct ice_aqc_nvm_checksum {
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+/* Used for NVM Set Package Data command - 0x070A */
+struct ice_aqc_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define ICE_AQC_NVM_PKG_DELETE BIT(0) /* used for command call */
+#define ICE_AQC_NVM_PKG_SKIPPED BIT(0) /* used for command response */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ice_aqc_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+ u8 component_response_code; /* Response only */
+#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define ICE_AQ_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+#define ICE_AQ_NVM_PASS_COMP_TBL_START 0x1
+#define ICE_AQ_NVM_PASS_COMP_TBL_MIDDLE 0x2
+#define ICE_AQ_NVM_PASS_COMP_TBL_END 0x4
+#define ICE_AQ_NVM_PASS_COMP_TBL_START_AND_END 0x5
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1476,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
@@ -1537,7 +1642,7 @@ struct ice_aqc_add_tx_qgrp {
__le32 parent_teid;
u8 num_txqs;
u8 rsvd[3];
- struct ice_aqc_add_txqs_perq txqs[1];
+ struct ice_aqc_add_txqs_perq txqs[];
};
/* Disable Tx LAN Queues (indirect 0x0C31) */
@@ -1575,18 +1680,13 @@ struct ice_aqc_dis_txq_item {
u8 num_qs;
u8 rsvd;
/* The length of the q_id array varies according to num_qs */
- __le16 q_id[1];
- /* This only applies from F8 onward */
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
-};
-
-struct ice_aqc_dis_txq {
- struct ice_aqc_dis_txq_item qgrps[1];
-};
+ __le16 q_id[];
+} __packed;
/* Configure Firmware Logging Command (indirect 0xFF09)
* Logging Information Read Response (indirect 0xFF10)
@@ -1636,12 +1736,7 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
-/* This is the buffer for both of the logging commands.
- * The entry array size depends on the datalen parameter in the descriptor.
- * There will be a total of datalen / 2 entries.
- */
-struct ice_aqc_fw_logging_data {
- __le16 entry[1];
+/* Defines for both above FW logging command/response buffers */
#define ICE_AQC_FW_LOG_ID_S 0
#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
@@ -1654,7 +1749,6 @@ struct ice_aqc_fw_logging_data {
#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-};
/* Get/Clear FW Log (indirect 0xFF11) */
struct ice_aqc_get_clear_fw_log {
@@ -1716,7 +1810,7 @@ struct ice_aqc_get_pkg_info {
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
- struct ice_aqc_get_pkg_info pkg_info[1];
+ struct ice_aqc_get_pkg_info pkg_info[];
};
/* Lan Queue Overflow Event (direct, 0x1001) */
@@ -1775,6 +1869,8 @@ struct ice_aq_desc {
struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_nvm_pkg_data pkg_data;
+ struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
@@ -1797,6 +1893,7 @@ struct ice_aq_desc {
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
+ struct ice_aqc_get_link_topo get_link_topo;
} params;
};
@@ -1896,12 +1993,19 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_erase = 0x0702,
+ ice_aqc_opc_nvm_write = 0x0703,
ice_aqc_opc_nvm_checksum = 0x0706,
+ ice_aqc_opc_nvm_write_activate = 0x0707,
+ ice_aqc_opc_nvm_update_empr = 0x0709,
+ ice_aqc_opc_nvm_pkg_data = 0x070A,
+ ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d620d26d42ed..87008476d8fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -635,10 +635,10 @@ int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
+ u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 pf_q;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index bce0e1281168..34abfcea9858 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -20,7 +20,40 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
return ICE_ERR_DEVICE_NOT_SUPPORTED;
- hw->mac_type = ICE_MAC_GENERIC;
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
+ hw->mac_type = ICE_MAC_E810;
+ break;
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ hw->mac_type = ICE_MAC_GENERIC;
+ break;
+ default:
+ hw->mac_type = ICE_MAC_UNKNOWN;
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
return 0;
}
@@ -52,7 +85,8 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* is returned in user specified buffer. Please interpret user specified
* buffer as "manage_mac_read" response.
* Response such as various MAC addresses are stored in HW struct (port.mac)
- * ice_aq_discover_caps is expected to be called before this function is called.
+ * ice_discover_dev_caps is expected to be called before this function is
+ * called.
*/
static enum ice_status
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
@@ -116,11 +150,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return ICE_ERR_PARAM;
+ hw = pi->hw;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -128,17 +164,94 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
cmd->param0 |= cpu_to_le16(report_mode);
- status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+ status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
+
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
+ report_mode);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ pcaps->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ pcaps->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ pcaps->link_fec_options);
+ ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
+ pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
+ pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ pcaps->module_type[0]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ pcaps->module_type[1]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
+ sizeof(pi->phy.link_info.module_type));
}
return status;
}
/**
+ * ice_aq_get_link_topo_handle - get link topology node return status
+ * @pi: port information structure
+ * @node_type: requested node type
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get link topology node return status for specified node type (0x06E0)
+ *
+ * Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present, then
+ * connection type is backplane or BASE-T.
+ */
+static enum ice_status
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_is_media_cage_present
+ * @pi: port information structure
+ *
+ * Returns true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
+{
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ice_aq_get_link_topo_handle(pi,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
+ NULL);
+}
+
+/**
* ice_get_media_type - Gets media type
* @pi: port information structure
*/
@@ -155,6 +268,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ICE_MEDIA_DA;
+
switch (hw_link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_1000BASE_SX:
case ICE_PHY_TYPE_LOW_1000BASE_LX:
@@ -163,7 +288,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
@@ -175,6 +299,14 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -194,6 +326,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ fallthrough;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_X:
@@ -211,8 +353,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
}
} else {
switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ fallthrough;
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ return ICE_MEDIA_FIBER;
}
}
return ICE_MEDIA_UNKNOWN;
@@ -292,18 +442,21 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
- ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "get link info\n");
+ ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)li->phy_type_low);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)li->phy_type_high);
- ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
- ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
- ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
- ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
- ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
- ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
- ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+ ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
+ ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
+ li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
@@ -440,32 +593,24 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), sw);
}
-#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
- (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
-#define ICE_FW_LOG_DESC_SIZE_MAX \
- ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
-
/**
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
{
- struct ice_aqc_fw_logging_data *config;
struct ice_aq_desc desc;
enum ice_status status;
+ __le16 *config;
u16 size;
- size = ICE_FW_LOG_DESC_SIZE_MAX;
+ size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
return ICE_ERR_NO_MEMORY;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
if (!status) {
u16 i;
@@ -474,7 +619,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
u16 v, m, flgs;
- v = le16_to_cpu(config->entry[i]);
+ v = le16_to_cpu(config[i]);
m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
@@ -526,11 +671,11 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
*/
static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
- struct ice_aqc_fw_logging_data *data = NULL;
struct ice_aqc_fw_logging *cmd;
enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
+ __le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
@@ -571,8 +716,9 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
continue;
if (!data) {
- data = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_FW_LOG_DESC_SIZE_MAX,
+ data = devm_kcalloc(ice_hw_to_dev(hw),
+ sizeof(*data),
+ ICE_AQC_FW_LOG_ID_MAX,
GFP_KERNEL);
if (!data)
return ICE_ERR_NO_MEMORY;
@@ -580,7 +726,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
val = i << ICE_AQC_FW_LOG_ID_S;
val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data->entry[chgs++] = cpu_to_le16(val);
+ data[chgs++] = cpu_to_le16(val);
}
/* Only enable FW logging if at least one module is specified.
@@ -599,7 +745,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
buf = data;
- len = ICE_FW_LOG_DESC_SIZE(chgs);
+ len = sizeof(*data) * chgs;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
}
}
@@ -629,7 +775,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
continue;
}
- v = le16_to_cpu(data->entry[i]);
+ v = le16_to_cpu(data[i]);
m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
}
@@ -881,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay, uld_mask;
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
- for (cnt = 0; cnt < grst_delay; cnt++) {
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
- if (cnt == grst_delay) {
+ if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
@@ -1541,7 +1687,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
enum ice_status status;
u16 buf_len;
- buf_len = struct_size(buf, elem, num - 1);
+ buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1558,7 +1704,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
if (status)
goto ice_alloc_res_exit;
- memcpy(res, buf->elem, sizeof(buf->elem) * num);
+ memcpy(res, buf->elem, sizeof(*buf->elem) * num);
ice_alloc_res_exit:
kfree(buf);
@@ -1572,14 +1718,13 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status
-ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
u16 buf_len;
- buf_len = struct_size(buf, elem, num - 1);
+ buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1587,7 +1732,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
buf->res_type = cpu_to_le16(type);
- memcpy(buf->elem, res, sizeof(buf->elem) * num);
+ memcpy(buf->elem, res, sizeof(*buf->elem) * num);
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
ice_aqc_opc_free_res, NULL);
@@ -1622,221 +1767,431 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
}
/**
- * ice_parse_caps - parse function/device capabilities
+ * ice_parse_common_caps - parse common device/function capabilities
* @hw: pointer to the HW struct
- * @buf: pointer to a buffer containing function/device capability records
- * @cap_count: number of capability records in the list
- * @opc: type of capabilities list to parse
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Returns: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ struct ice_aqc_list_caps_elem *elem, const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+ bool found = true;
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: valid_functions (bitmap) = %d\n", prefix,
+ caps->valid_functions);
+ break;
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: sr_iov_1_1 = %d\n", prefix,
+ caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_size = %d\n", prefix,
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_entry_width = %d\n", prefix,
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_rxq = %d\n", prefix,
+ caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rxq_first_id = %d\n", prefix,
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_txq = %d\n", prefix,
+ caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: txq_first_id = %d\n", prefix,
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_msix_vectors = %d\n", prefix,
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: msix_vector_first_id = %d\n", prefix,
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
+ break;
+ case ICE_AQC_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ prefix, caps->max_mtu);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ice_recalc_port_limited_caps - Recalculate port limited capabilities
+ * @hw: pointer to the HW structure
+ * @caps: pointer to capabilities structure to fix
+ *
+ * Re-calculate the capabilities that are dependent on the number of physical
+ * ports; i.e. some features are not supported or function differently on
+ * devices with more than 4 ports.
+ */
+static void
+ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
+{
+ /* This assumes device capabilities are always scanned before function
+ * capabilities during the initialization flow.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "reducing maxtc to %d (based on #ports)\n",
+ caps->maxtc);
+ }
+}
+
+/**
+ * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VF.
+ */
+static void
+ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 logical_id = le32_to_cpu(cap->logical_id);
+ u32 number = le32_to_cpu(cap->number);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+ func_p->vf_base_id);
+}
+
+/**
+ * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VSI.
+ */
+static void
+ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+ le32_to_cpu(cap->number));
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+ func_p->guar_num_vsi);
+}
+
+/**
+ * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_FD.
+ */
+static void
+ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
+{
+ u32 reg_val, val;
+
+ reg_val = rd32(hw, GLQF_FD_SIZE);
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+ GLQF_FD_SIZE_FD_GSIZE_S;
+ func_p->fd_fltr_guar =
+ ice_get_num_per_func(hw, val);
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+ GLQF_FD_SIZE_FD_BSIZE_S;
+ func_p->fd_fltr_best_effort = val;
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: fd_fltr_guar = %d\n",
+ func_p->fd_fltr_guar);
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: fd_fltr_best_effort = %d\n",
+ func_p->fd_fltr_best_effort);
+}
+
+/**
+ * ice_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
*
- * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
*/
static void
-ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
- enum ice_adminq_opc opc)
+ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
{
struct ice_aqc_list_caps_elem *cap_resp;
- struct ice_hw_func_caps *func_p = NULL;
- struct ice_hw_dev_caps *dev_p = NULL;
- struct ice_hw_common_caps *caps;
- char const *prefix;
u32 i;
- if (!buf)
- return;
-
cap_resp = (struct ice_aqc_list_caps_elem *)buf;
- if (opc == ice_aqc_opc_list_dev_caps) {
- dev_p = &hw->dev_caps;
- caps = &dev_p->common_cap;
- prefix = "dev cap";
- } else if (opc == ice_aqc_opc_list_func_caps) {
- func_p = &hw->func_caps;
- caps = &func_p->common_cap;
- prefix = "func cap";
- } else {
- ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
- return;
- }
+ memset(func_p, 0, sizeof(*func_p));
- for (i = 0; caps && i < cap_count; i++, cap_resp++) {
- u32 logical_id = le32_to_cpu(cap_resp->logical_id);
- u32 phys_id = le32_to_cpu(cap_resp->phys_id);
- u32 number = le32_to_cpu(cap_resp->number);
- u16 cap = le16_to_cpu(cap_resp->cap);
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+ bool found;
- switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
- caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: valid_functions (bitmap) = %d\n", prefix,
- caps->valid_functions);
+ found = ice_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
- /* store func count for resource management purposes */
- if (dev_p)
- dev_p->num_funcs = hweight32(number);
- break;
- case ICE_AQC_CAPS_SRIOV:
- caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: sr_iov_1_1 = %d\n", prefix,
- caps->sr_iov_1_1);
- break;
+ switch (cap) {
case ICE_AQC_CAPS_VF:
- if (dev_p) {
- dev_p->num_vfs_exposed = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vfs_exposed = %d\n", prefix,
- dev_p->num_vfs_exposed);
- } else if (func_p) {
- func_p->num_allocd_vfs = number;
- func_p->vf_base_id = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_allocd_vfs = %d\n", prefix,
- func_p->num_allocd_vfs);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: vf_base_id = %d\n", prefix,
- func_p->vf_base_id);
- }
+ ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_VSI:
- if (dev_p) {
- dev_p->num_vsi_allocd_to_host = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vsi_allocd_to_host = %d\n",
- prefix,
- dev_p->num_vsi_allocd_to_host);
- } else if (func_p) {
- func_p->guar_num_vsi =
- ice_get_num_per_func(hw, ICE_MAX_VSI);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi (fw) = %d\n",
- prefix, number);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi = %d\n",
- prefix, func_p->guar_num_vsi);
- }
+ ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_DCB:
- caps->dcb = (number == 1);
- caps->active_tc_bitmap = logical_id;
- caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: active_tc_bitmap = %d\n", prefix,
- caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d\n", prefix, caps->maxtc);
- break;
- case ICE_AQC_CAPS_RSS:
- caps->rss_table_size = number;
- caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_size = %d\n", prefix,
- caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_entry_width = %d\n", prefix,
- caps->rss_table_entry_width);
+ case ICE_AQC_CAPS_FD:
+ ice_parse_fdir_func_caps(hw, func_p);
break;
- case ICE_AQC_CAPS_RXQS:
- caps->num_rxq = number;
- caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_rxq = %d\n", prefix,
- caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rxq_first_id = %d\n", prefix,
- caps->rxq_first_id);
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT,
+ "func caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
break;
- case ICE_AQC_CAPS_TXQS:
- caps->num_txq = number;
- caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_txq = %d\n", prefix,
- caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: txq_first_id = %d\n", prefix,
- caps->txq_first_id);
+ }
+ }
+
+ ice_recalc_port_limited_caps(hw, &func_p->common_cap);
+}
+
+/**
+ * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_funcs = hweight32(number);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+ dev_p->num_funcs);
+}
+
+/**
+ * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VF for device capabilities.
+ */
+static void
+ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+ dev_p->num_vfs_exposed);
+}
+
+/**
+ * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VSI for device capabilities.
+ */
+static void
+ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+ dev_p->num_vsi_allocd_to_host);
+}
+
+/**
+ * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_FD for device capabilities.
+ */
+static void
+ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = le32_to_cpu(cap->number);
+
+ dev_p->num_flow_director_fltr = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
+ dev_p->num_flow_director_fltr);
+}
+
+/**
+ * ice_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void
+ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+ bool found;
+
+ found = ice_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_MSIX:
- caps->num_msix_vectors = number;
- caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_msix_vectors = %d\n", prefix,
- caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: msix_vector_first_id = %d\n", prefix,
- caps->msix_vector_first_id);
+ case ICE_AQC_CAPS_VF:
+ ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
- if (dev_p) {
- dev_p->num_flow_director_fltr = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_flow_director_fltr = %d\n",
- prefix,
- dev_p->num_flow_director_fltr);
- }
- if (func_p) {
- u32 reg_val, val;
-
- reg_val = rd32(hw, GLQF_FD_SIZE);
- val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
- GLQF_FD_SIZE_FD_GSIZE_S;
- func_p->fd_fltr_guar =
- ice_get_num_per_func(hw, val);
- val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
- GLQF_FD_SIZE_FD_BSIZE_S;
- func_p->fd_fltr_best_effort = val;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: fd_fltr_guar = %d\n",
- prefix, func_p->fd_fltr_guar);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: fd_fltr_best_effort = %d\n",
- prefix, func_p->fd_fltr_best_effort);
- }
+ case ICE_AQC_CAPS_VSI:
+ ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_MAX_MTU:
- caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
- prefix, caps->max_mtu);
+ case ICE_AQC_CAPS_FD:
+ ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
- ice_debug(hw, ICE_DBG_INIT,
- "%s: unknown capability[%d]: 0x%x\n", prefix,
- i, cap);
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
break;
}
}
- /* Re-calculate capabilities that are dependent on the number of
- * physical ports; i.e. some features are not supported or function
- * differently on devices with more than 4 ports.
- */
- if (hw->dev_caps.num_funcs > 4) {
- /* Max 4 TCs per port */
- caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d (based on #ports)\n", prefix,
- caps->maxtc);
- }
+ ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
}
/**
- * ice_aq_discover_caps - query function/device capabilities
+ * ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
- * @buf: a virtual buffer to hold the capabilities
- * @buf_size: Size of the virtual buffer
- * @cap_count: cap count needed if AQ err==ENOMEM
- * @opc: capabilities type to discover - pass in the command opcode
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
* @cd: pointer to command details structure or NULL
*
- * Get the function(0x000a)/device(0x000b) capabilities description from
- * the firmware.
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
+ * firmware could return) to avoid this.
*/
-static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
@@ -1849,59 +2204,78 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status)
- ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
- else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+
+ if (cap_count)
*cap_count = le32_to_cpu(cmd->count);
+
return status;
}
/**
- * ice_discover_caps - get info about the HW
+ * ice_discover_dev_caps - Read and extract device capabilities
* @hw: pointer to the hardware structure
- * @opc: capabilities type to discover - pass in the command opcode
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
*/
-static enum ice_status
-ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+enum ice_status
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
enum ice_status status;
- u32 cap_count;
- u16 cbuf_len;
- u8 retries;
-
- /* The driver doesn't know how many capabilities the device will return
- * so the buffer size required isn't known ahead of time. The driver
- * starts with cbuf_len and if this turns out to be insufficient, the
- * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
- * The driver then allocates the buffer based on the count and retries
- * the operation. So it follows that the retry count is 2.
+ u32 cap_count = 0;
+ void *cbuf;
+
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
*/
-#define ICE_GET_CAP_BUF_COUNT 40
-#define ICE_GET_CAP_RETRY_COUNT 2
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- cap_count = ICE_GET_CAP_BUF_COUNT;
- retries = ICE_GET_CAP_RETRY_COUNT;
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_dev_caps, NULL);
+ if (!status)
+ ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+ kfree(cbuf);
- do {
- void *cbuf;
+ return status;
+}
- cbuf_len = (u16)(cap_count *
- sizeof(struct ice_aqc_list_caps_elem));
- cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
- if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+/**
+ * ice_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ */
+static enum ice_status
+ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
+{
+ enum ice_status status;
+ u32 cap_count = 0;
+ void *cbuf;
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
- opc, NULL);
- devm_kfree(ice_hw_to_dev(hw), cbuf);
+ cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
- if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
- break;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- /* If ENOMEM is returned, try again with bigger buffer */
- } while (--retries);
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_func_caps, NULL);
+ if (!status)
+ ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
+ kfree(cbuf);
return status;
}
@@ -1978,11 +2352,11 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
{
enum ice_status status;
- status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
- if (!status)
- status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+ status = ice_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
- return status;
+ return ice_discover_func_caps(hw, &hw->func_caps);
}
/**
@@ -2218,7 +2592,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
* @hw: pointer to the HW struct
- * @lport: logical port number
+ * @pi: port info structure of the interested logical port
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
*
@@ -2228,7 +2602,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* parameters. This status will be indicated by the command response (0x0601).
*/
enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
@@ -2247,24 +2621,29 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = lport;
+ desc.params.set_phy.lport_num = pi->lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
- ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
- cfg->low_power_ctrl);
- ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ cfg->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
+ cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
status = 0;
+ if (!status)
+ pi->phy.curr_user_phy_cfg = *cfg;
+
return status;
}
@@ -2298,9 +2677,6 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
- if (!status)
- memcpy(li->module_type, &pcaps->module_type,
- sizeof(li->module_type));
devm_kfree(ice_hw_to_dev(hw), pcaps);
}
@@ -2309,28 +2685,101 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
}
/**
- * ice_set_fc
+ * ice_cache_phy_user_req
* @pi: port information structure
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @ena_auto_link_update: enable automatic link update
+ * @cache_data: PHY logging data
+ * @cache_mode: PHY logging mode
*
- * Set the requested flow control mode.
+ * Log the user request on (FC, FEC, SPEED) for later use.
+ */
+static void
+ice_cache_phy_user_req(struct ice_port_info *pi,
+ struct ice_phy_cache_mode_data cache_data,
+ enum ice_phy_cache_mode cache_mode)
+{
+ if (!pi)
+ return;
+
+ switch (cache_mode) {
+ case ICE_FC_MODE:
+ pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
+ break;
+ case ICE_SPEED_MODE:
+ pi->phy.curr_user_speed_req =
+ cache_data.data.curr_user_speed_req;
+ break;
+ case ICE_FEC_MODE:
+ pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_caps_to_fc_mode
+ * @caps: PHY capabilities
+ *
+ * Convert PHY FC capabilities to ice FC mode
+ */
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
+{
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
+ caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_FULL;
+
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+ return ICE_FC_TX_PAUSE;
+
+ if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_RX_PAUSE;
+
+ return ICE_FC_NONE;
+}
+
+/**
+ * ice_caps_to_fec_mode
+ * @caps: PHY capabilities
+ * @fec_options: Link FEC options
+ *
+ * Convert PHY FEC capabilities to ice FEC mode
+ */
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+{
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ return ICE_FEC_AUTO;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
+ return ICE_FEC_BASER;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
+ return ICE_FEC_RS;
+
+ return ICE_FEC_NONE;
+}
+
+/**
+ * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
+ * @pi: port information structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
*/
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fc_mode req_mode)
{
- struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
+ struct ice_phy_cache_mode_data cache_data;
u8 pause_mask = 0x0;
- struct ice_hw *hw;
- if (!pi)
- return ICE_ERR_PARAM;
- hw = pi->hw;
- *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
- switch (pi->fc.req_mode) {
+ switch (req_mode) {
case ICE_FC_FULL:
pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
@@ -2345,6 +2794,42 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
break;
}
+ /* clear the old pause settings */
+ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+ /* Cache user FC request */
+ cache_data.data.curr_user_fc_req = req_mode;
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+ return 0;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || !aq_failures)
+ return ICE_ERR_BAD_PTR;
+
+ *aq_failures = 0;
+ hw = pi->hw;
+
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return ICE_ERR_NO_MEMORY;
@@ -2357,12 +2842,12 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
goto out;
}
- /* clear the old pause settings */
- cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
- ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
- /* set the new capabilities */
- cfg.caps |= pause_mask;
+ /* Configure the set PHY data */
+ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
+ if (status)
+ goto out;
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
@@ -2371,15 +2856,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* Auto restart link so settings take effect */
if (ena_auto_link_update)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- /* Copy over all the old settings */
- cfg.phy_type_high = pcaps->phy_type_high;
- cfg.phy_type_low = pcaps->phy_type_low;
- cfg.low_power_ctrl = pcaps->low_power_ctrl;
- cfg.eee_cap = pcaps->eee_cap;
- cfg.eeer_value = pcaps->eeer_value;
- cfg.link_fec_opt = pcaps->link_fec_options;
-
- status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
if (status) {
*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
goto out;
@@ -2409,7 +2887,44 @@ out:
}
/**
+ * ice_phy_caps_equals_cfg
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities matches PHY
+ * configuration
+ */
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
+ struct ice_aqc_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
+ ICE_AQC_GET_PHY_EN_MOD_QUAL);
+ cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
* ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @pi: port information structure
* @caps: PHY ability structure to copy date from
* @cfg: PHY configuration structure to copy data to
*
@@ -2417,42 +2932,73 @@ out:
* data structure
*/
void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg)
{
- if (!caps || !cfg)
+ if (!pi || !caps || !cfg)
return;
+ memset(cfg, 0, sizeof(*cfg));
cfg->phy_type_low = caps->phy_type_low;
cfg->phy_type_high = caps->phy_type_high;
cfg->caps = caps->caps;
- cfg->low_power_ctrl = caps->low_power_ctrl;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
cfg->eee_cap = caps->eee_cap;
cfg->eeer_value = caps->eeer_value;
cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+
+ if (ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ return;
+
+ if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+ cfg->module_compliance_enforcement |=
+ ICE_LINK_OVERRIDE_STRICT_MODE;
+ }
}
/**
* ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @pi: port information structure
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
- *
- * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
- * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
- * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
*/
-void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec)
{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+
switch (fec) {
case ICE_FEC_BASER:
/* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
- ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
- ICE_AQC_PHY_FEC_25G_KR_REQ;
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
/* Clear BASE-R bits, and AND RS ability
@@ -2460,7 +3006,7 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
*/
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
- ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
/* Clear all FEC option bits. */
@@ -2469,8 +3015,28 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ cfg->link_fec_opt |= pcaps->link_fec_options;
break;
+ default:
+ status = ICE_ERR_PARAM;
+ break;
+ }
+
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ goto out;
+
+ if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
+ cfg->link_fec_opt = tlv.fec_options;
}
+
+out:
+ kfree(pcaps);
+
+ return status;
}
/**
@@ -2889,10 +3455,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
{
- u16 i, sum_header_size, sum_q_size = 0;
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
cmd = &desc.params.add_txqs;
@@ -2904,18 +3470,13 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- sum_header_size = num_qgrps *
- (sizeof(*qg_list) - sizeof(*qg_list->txqs));
-
- list = qg_list;
- for (i = 0; i < num_qgrps; i++) {
- struct ice_aqc_add_txqs_perq *q = list->txqs;
-
- sum_q_size += list->num_txqs * sizeof(*q);
- list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ for (i = 0, list = qg_list; i < num_qgrps; i++) {
+ sum_size += struct_size(list, txqs, list->num_txqs);
+ list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
+ list->num_txqs);
}
- if (buf_size != (sum_header_size + sum_q_size))
+ if (buf_size != sum_size)
return ICE_ERR_PARAM;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2943,6 +3504,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
+ struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
enum ice_status status;
@@ -2992,16 +3554,16 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*/
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- for (i = 0; i < num_qgrps; ++i) {
- /* Calculate the size taken up by the queue IDs in this group */
- sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
-
- /* Add the size of the group header */
- sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+ for (i = 0, item = qg_list; i < num_qgrps; i++) {
+ u16 item_size = struct_size(item, q_id, item->num_qs);
/* If the num of queues is even, add 2 bytes of padding */
- if ((qg_list[i].num_qs % 2) == 0)
- sz += 2;
+ if ((item->num_qs % 2) == 0)
+ item_size += 2;
+
+ sz += item_size;
+
+ item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
}
if (buf_size != sz)
@@ -3342,7 +3904,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ buf->txqs[0].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->txqs[0].info.generic = 0;
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
@@ -3390,24 +3963,32 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
- struct ice_aqc_dis_txq_item qg_list;
+ struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
- u16 i;
+ struct ice_hw *hw;
+ u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ hw = pi->hw;
+
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
* ice_aq_dis_lan_txq without any queue information
*/
if (rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
return ICE_ERR_CFG;
}
+ buf_size = struct_size(qg_list, q_id, 1);
+ qg_list = kzalloc(buf_size, GFP_KERNEL);
+ if (!qg_list)
+ return ICE_ERR_NO_MEMORY;
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -3416,23 +3997,22 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
- q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
if (!q_ctx) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
q_handles[i]);
continue;
}
if (q_ctx->q_handle != q_handles[i]) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
q_ctx->q_handle, q_handles[i]);
continue;
}
- qg_list.parent_teid = node->info.parent_teid;
- qg_list.num_qs = 1;
- qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
- status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), rst_src, vmvf_num,
- cd);
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
+ vmvf_num, cd);
if (status)
break;
@@ -3440,6 +4020,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
mutex_unlock(&pi->sched_lock);
+ kfree(qg_list);
return status;
}
@@ -3652,17 +4233,168 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*/
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
+ struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
- buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ buf->node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ /* Currently, only supported for E810 devices */
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
+ hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_link_default_override
+ * @ldo: pointer to the link default override struct
+ * @pi: pointer to the port info struct
+ *
+ * Gets the link default override for a port
+ */
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi)
+{
+ u16 i, tlv, tlv_len, tlv_start, buf, offset;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
+ ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read link override TLV.\n");
+ return status;
+ }
+
+ /* Each port has its own config; calculate for our port */
+ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
+ ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
+
+ /* link options first */
+ status = ice_read_sr_word(hw, tlv_start, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
+ ICE_LINK_OVERRIDE_PHY_CFG_S;
+
+ /* link PHY config */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
+ status = ice_read_sr_word(hw, offset, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override phy config.\n");
+ return status;
+ }
+ ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
+
+ /* PHY types low */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_low |= ((u64)buf << (i * 16));
+ }
+
+ /* PHY types high */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
+ ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_high |= ((u64)buf << (i * 16));
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
+ * @caps: get PHY capability data
+ */
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
+{
+ if (caps->caps & ICE_AQC_PHY_AN_MODE ||
+ caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
+ ICE_AQC_PHY_AN_EN_CLAUSE73 |
+ ICE_AQC_PHY_AN_EN_CLAUSE37))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9b9e50d2398b..3ebb973878c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -11,8 +11,6 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -87,6 +85,11 @@ enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@@ -95,17 +98,33 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+bool ice_fw_supports_link_override(struct ice_hw *hw);
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi);
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
+enum ice_status
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fc_mode fc);
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
-void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec);
+enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
@@ -152,5 +171,8 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf);
+ struct ice_aqc_txsched_elem_data *buf);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 1e18021aa073..1f46a7828be8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
- int i; \
/* free descriptors */ \
- if ((qi)->ring.r.ring##_bi) \
+ if ((qi)->ring.r.ring##_bi) { \
+ int i; \
+ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
@@ -325,6 +326,7 @@ do { \
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
+ } \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index adb8dab765c8..2a3147ee0bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -135,39 +135,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
}
/**
- * ice_aq_set_lldp_mib - Set the LLDP MIB
- * @hw: pointer to the HW struct
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buf: pointer to the caller-supplied buffer to store the MIB block
- * @buf_size: size of the buffer (in bytes)
- * @cd: pointer to command details structure or NULL
- *
- * Set the LLDP MIB. (0x0A08)
- */
-static enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.lldp_set_mib;
-
- if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
-
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
- desc.datalen = cpu_to_le16(buf_size);
-
- cmd->type = mib_type;
- cmd->length = cpu_to_le16(buf_size);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
-/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
*
@@ -1362,7 +1329,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
- struct ice_aqc_get_elem elem;
+ struct ice_aqc_txsched_elem_data elem;
enum ice_status status = 0;
u32 teid1, teid2;
u8 i, j;
@@ -1404,7 +1371,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
- status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ status = ice_sched_add_node(pi, 1, &elem);
if (status)
break;
/* update the TC number */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index ee138f9bdc7c..d7e5e6178a21 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -87,7 +87,7 @@
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[];
} __packed;
struct ice_cee_tlv_hdr {
@@ -109,7 +109,7 @@ struct ice_cee_feat_tlv {
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[];
};
struct ice_cee_app_prio {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 979af197f8a3..36abd6b7280c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- /* If DCB was not enabled previously, we are done */
- if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
- return;
-
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp)
@@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
}
- dev_info(dev, "DCB restored after reset\n");
+ dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 323238669572..35c21d9ae009 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
+
+static inline bool ice_is_dcb_active(struct ice_pf *pf)
+{
+ return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
+ test_bit(ICE_FLAG_DCB_ENA, pf->flags));
+}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
@@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
+
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index a73d06e06b5d..111d6bfe4222 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_fw_update.h"
static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
@@ -229,8 +230,61 @@ static int ice_devlink_info_get(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_flash_update - Update firmware stored in flash on the device
+ * @devlink: pointer to devlink associated with device to update
+ * @path: the path of the firmware file to use via request_firmware
+ * @component: name of the component to update, or NULL
+ * @extack: netlink extended ACK structure
+ *
+ * Perform a device flash update. The bulk of the update logic is contained
+ * within the ice_flash_pldm_image function.
+ *
+ * Returns: zero on success, or an error code on failure.
+ */
+static int
+ice_devlink_flash_update(struct devlink *devlink, const char *path,
+ const char *component, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+ const struct firmware *fw;
+ int err;
+
+ /* individual component update is not yet supported */
+ if (component)
+ return -EOPNOTSUPP;
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ err = ice_check_for_pending_update(pf, component, extack);
+ if (err)
+ return err;
+
+ err = request_firmware(&fw, path, dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
+ return err;
+ }
+
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash",
+ component, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_end_notify(devlink);
+
+ release_firmware(fw);
+
+ return err;
+}
+
static const struct devlink_ops ice_devlink_ops = {
.info_get = ice_devlink_info_get,
+ .flash_update = ice_devlink_flash_update,
};
static void ice_devlink_free(void *devlink_ptr)
@@ -303,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function id.
+ * will still be numbered according to the physical function ID.
*
* Return: zero on success or an error code on failure.
*/
@@ -312,6 +366,7 @@ int ice_devlink_create_port(struct ice_pf *pf)
struct devlink *devlink = priv_to_devlink(pf);
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct device *dev = ice_pf_to_dev(pf);
+ struct devlink_port_attrs attrs = {};
int err;
if (!vsi) {
@@ -319,8 +374,9 @@ int ice_devlink_create_port(struct ice_pf *pf)
return -EIO;
}
- devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- pf->hw.pf_id, false, 0, NULL, 0);
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.pf_id;
+ devlink_port_attrs_set(&pf->devlink_port, &attrs);
err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
@@ -397,12 +453,60 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the device-caps devlink region. It captures a snapshot of the device
+ * capabilities reported by firmware.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int
+ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ void *devcaps;
+
+ devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
+ if (!devcaps)
+ return -ENOMEM;
+
+ status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
+ ice_aqc_opc_list_dev_caps, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
+ vfree(devcaps);
+ return -EIO;
+ }
+
+ *data = (u8 *)devcaps;
+
+ return 0;
+}
+
static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
};
+static const struct devlink_region_ops ice_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = vfree,
+ .snapshot = ice_devlink_devcaps_snapshot,
+};
+
/**
* ice_devlink_init_regions - Initialize devlink regions
* @pf: the PF device structure
@@ -424,6 +528,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
PTR_ERR(pf->nvm_region));
pf->nvm_region = NULL;
}
+
+ pf->devcaps_region = devlink_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
+ if (IS_ERR(pf->devcaps_region)) {
+ dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(pf->devcaps_region));
+ pf->devcaps_region = NULL;
+ }
}
/**
@@ -436,4 +549,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
+ if (pf->devcaps_region)
+ devlink_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 68c38004a088..9e8e9531cd87 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_busy", tx_busy),
+ ICE_VSI_STAT("tx_restart", tx_restart),
};
enum ice_ethtool_test_id {
@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
@@ -179,7 +183,6 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
orom = &nvm->orom;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
@@ -967,12 +970,8 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_set_phy_cfg_data config = { 0 };
- struct ice_aqc_get_phy_caps_data *caps;
struct ice_vsi *vsi = np->vsi;
- u8 sw_cfg_caps, sw_cfg_fec;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
pi = vsi->port_info;
if (!pi)
@@ -984,54 +983,26 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
return -EOPNOTSUPP;
}
- /* Get last SW configuration */
- caps = kzalloc(sizeof(*caps), GFP_KERNEL);
- if (!caps)
- return -ENOMEM;
-
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
-
- /* Copy SW configuration returned from PHY caps to PHY config */
- ice_copy_phy_caps_to_cfg(caps, &config);
- sw_cfg_caps = caps->caps;
- sw_cfg_fec = caps->link_fec_options;
-
- /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
- memset(caps, 0, sizeof(*caps));
-
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
-
- config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
- config.link_fec_opt = caps->link_fec_options;
+ /* Proceed only if requesting different FEC mode */
+ if (pi->phy.curr_user_fec_req == req_fec)
+ return 0;
- ice_cfg_phy_fec(&config, req_fec);
+ /* Copy the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
- /* If FEC mode has changed, then set PHY configuration and enable AN. */
- if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
- (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
- config.link_fec_opt != sw_cfg_fec) {
- if (caps->caps & ICE_AQC_PHY_AN_MODE)
- config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ ice_cfg_phy_fec(pi, &config, req_fec);
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
+ if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
+ return -EAGAIN;
- if (status)
- err = -EAGAIN;
- }
+ /* Save requested FEC config */
+ pi->phy.curr_user_fec_req = req_fec;
-done:
- kfree(caps);
- return err;
+ return 0;
}
/**
@@ -1229,6 +1200,17 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
+ /* Do not allow change to link-down-on-close when Total Port Shutdown
+ * is enabled.
+ */
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
+ test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
+ dev_err(dev, "Setting link-down-on-close not supported on this port\n");
+ set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+ ret = -EINVAL;
+ goto ethtool_exit;
+ }
+
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
enum ice_status status;
@@ -1316,6 +1298,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -1420,6 +1403,77 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
+ ICE_PHY_TYPE_LOW_100M_SGMII)
+
+#define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
+ ICE_PHY_TYPE_LOW_1000BASE_T | \
+ ICE_PHY_TYPE_LOW_1000BASE_SX | \
+ ICE_PHY_TYPE_LOW_1000BASE_LX | \
+ ICE_PHY_TYPE_LOW_1000BASE_KX | \
+ ICE_PHY_TYPE_LOW_1G_SGMII | \
+ ICE_PHY_TYPE_LOW_2500BASE_T | \
+ ICE_PHY_TYPE_LOW_2500BASE_X | \
+ ICE_PHY_TYPE_LOW_2500BASE_KX | \
+ ICE_PHY_TYPE_LOW_5GBASE_T | \
+ ICE_PHY_TYPE_LOW_5GBASE_KR | \
+ ICE_PHY_TYPE_LOW_10GBASE_T | \
+ ICE_PHY_TYPE_LOW_10G_SFI_DA | \
+ ICE_PHY_TYPE_LOW_10GBASE_SR | \
+ ICE_PHY_TYPE_LOW_10GBASE_LR | \
+ ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
+ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_10G_SFI_C2C)
+
+#define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4 | \
+ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_AUI4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_DR)
+
+#define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
+ ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
+ ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2)
+
+/**
+ * ice_mask_min_supported_speeds
+ * @phy_types_high: PHY type high
+ * @phy_types_low: PHY type low to apply minimum supported speeds mask
+ *
+ * Apply minimum supported speeds mask to PHY type low. These are the speeds
+ * for ethtool supported link mode.
+ */
+static
+void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+{
+ /* if QSFP connection with 100G speed, minimum supported speed is 25G */
+ if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
+ phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
+ *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
+ else
+ *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
+}
+
+#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \
+ do { \
+ if (req_speeds & (aq_link_speed) || \
+ (!req_speeds && \
+ (adv_phy_type_lo & phy_type_mask_lo || \
+ adv_phy_type_hi & phy_type_mask_hi))) \
+ ethtool_link_ksettings_add_link_mode(ks, advertising,\
+ ethtool_link_mode); \
+ } while (0)
+
/**
* ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
* @netdev: network interface device structure
@@ -1430,277 +1484,312 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_link_status *hw_link_info;
- bool need_add_adv_mode = false;
struct ice_vsi *vsi = np->vsi;
- u64 phy_types_high;
- u64 phy_types_low;
+ struct ice_pf *pf = vsi->back;
+ u64 phy_type_mask_lo = 0;
+ u64 phy_type_mask_hi = 0;
+ u64 adv_phy_type_lo = 0;
+ u64 adv_phy_type_hi = 0;
+ u64 phy_types_high = 0;
+ u64 phy_types_low = 0;
+ u16 req_speeds;
+
+ req_speeds = vsi->port_info->phy.link_info.req_speeds;
+
+ /* Check if lenient mode is supported and enabled, or in strict mode.
+ *
+ * In lenient mode the Supported link modes are the PHY types without
+ * media. The Advertising link mode is either 1. the user requested
+ * speed, 2. the override PHY mask, or 3. the PHY types with media.
+ *
+ * In strict mode Supported link mode are the PHY type with media,
+ * and Advertising link modes are the media PHY type or the speed
+ * requested by user.
+ */
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+ struct ice_link_default_override_tlv *ldo;
- hw_link_info = &vsi->port_info->phy.link_info;
- phy_types_low = vsi->port_info->phy.phy_type_low;
- phy_types_high = vsi->port_info->phy.phy_type_high;
+ ldo = &pf->link_dflt_override;
+ phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
+ phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
+
+ ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+
+ /* If override enabled and PHY mask set, then
+ * Advertising link mode is the intersection of the PHY
+ * types without media and the override PHY mask.
+ */
+ if (ldo->options & ICE_LINK_OVERRIDE_EN &&
+ (ldo->phy_type_low || ldo->phy_type_high)) {
+ adv_phy_type_lo =
+ le64_to_cpu(pf->nvm_phy_type_lo) &
+ ldo->phy_type_low;
+ adv_phy_type_hi =
+ le64_to_cpu(pf->nvm_phy_type_hi) &
+ ldo->phy_type_high;
+ }
+ } else {
+ phy_types_low = vsi->port_info->phy.phy_type_low;
+ phy_types_high = vsi->port_info->phy.phy_type_high;
+ }
+
+ /* If Advertising link mode PHY type is not using override PHY type,
+ * then use PHY type with media.
+ */
+ if (!adv_phy_type_lo && !adv_phy_type_hi) {
+ adv_phy_type_lo = vsi->port_info->phy.phy_type_low;
+ adv_phy_type_hi = vsi->port_info->phy.phy_type_high;
+ }
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX |
+ ICE_PHY_TYPE_LOW_100M_SGMII;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100baseT_Full);
+
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB,
+ 100baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseKX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseKX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+ 1000baseX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+ 2500baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X |
+ ICE_PHY_TYPE_LOW_2500BASE_KX;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseX_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseX_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+ 2500baseX_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T |
+ ICE_PHY_TYPE_LOW_5GBASE_KR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
5000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 5000baseT_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB,
+ 5000baseT_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T |
+ ICE_PHY_TYPE_LOW_10G_SFI_DA |
+ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_10G_SFI_C2C;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseT_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseKR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseKR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseSR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseSR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseLR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseLR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+ 10000baseLR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T |
+ ICE_PHY_TYPE_LOW_25GBASE_CR |
+ ICE_PHY_TYPE_LOW_25GBASE_CR_S |
+ ICE_PHY_TYPE_LOW_25GBASE_CR1 |
+ ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_25G_AUI_C2C;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseCR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseCR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR |
+ ICE_PHY_TYPE_LOW_25GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseSR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseSR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR |
+ ICE_PHY_TYPE_LOW_25GBASE_KR_S |
+ ICE_PHY_TYPE_LOW_25GBASE_KR1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseKR_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+ 25000baseKR_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseKR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseKR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseKR4_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 |
+ ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC |
+ ICE_PHY_TYPE_LOW_40G_XLAUI;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseCR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseCR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseSR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseSR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseLR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+ 40000baseLR4_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 |
+ ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_LAUI2 |
+ ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_AUI2 |
+ ICE_PHY_TYPE_LOW_50GBASE_CP |
+ ICE_PHY_TYPE_LOW_50GBASE_SR |
+ ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC |
+ ICE_PHY_TYPE_LOW_50G_AUI1;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseCR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseCR2_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseKR2_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseKR2_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_LR2 |
+ ICE_PHY_TYPE_LOW_50GBASE_FR |
+ ICE_PHY_TYPE_LOW_50GBASE_LR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseSR2_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
- phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+ 50000baseSR2_Full);
+ }
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 |
+ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC |
+ ICE_PHY_TYPE_LOW_100G_CAUI4 |
+ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
+ ICE_PHY_TYPE_LOW_100G_AUI4 |
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
+ ICE_PHY_TYPE_LOW_100GBASE_CP2;
+ phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
+ ICE_PHY_TYPE_HIGH_100G_CAUI2 |
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
+ ICE_PHY_TYPE_HIGH_100G_AUI2;
+ if (phy_types_low & phy_type_mask_lo ||
+ phy_types_high & phy_type_mask_hi) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseCR4_Full);
}
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_SR2;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
- }
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseSR4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseSR4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_DR;
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
- }
- if (need_add_adv_mode) {
- need_add_adv_mode = false;
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseLR4_ER4_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseLR4_ER4_Full);
}
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+
+ phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
+ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
+ phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
+ if (phy_types_low & phy_type_mask_lo ||
+ phy_types_high & phy_type_mask_hi) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
- if (!hw_link_info->req_speeds ||
- hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
- need_add_adv_mode = true;
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR4_Full);
}
- if (need_add_adv_mode)
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
/* Autoneg PHY types */
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
@@ -2128,18 +2217,18 @@ static int
ice_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *ks)
{
- u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ethtool_link_ksettings safe_ks, copy_ks;
struct ice_aqc_get_phy_caps_data *abilities;
+ u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
u16 adv_link_speed, curr_link_speed, idx;
struct ice_aqc_set_phy_cfg_data config;
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *p;
u8 autoneg_changed = 0;
enum ice_status status;
- u64 phy_type_high;
- u64 phy_type_low;
+ u64 phy_type_high = 0;
+ u64 phy_type_low = 0;
int err = 0;
bool linkup;
@@ -2163,6 +2252,18 @@ ice_set_link_ksettings(struct net_device *netdev,
p->phy.link_info.link_info & ICE_AQ_LINK_UP)
return -EOPNOTSUPP;
+ abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
+ if (!abilities)
+ return -ENOMEM;
+
+ /* Get the PHY capabilities based on media */
+ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP,
+ abilities, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
/* copy the ksettings to copy_ks to avoid modifying the original */
memcpy(&copy_ks, ks, sizeof(copy_ks));
@@ -2179,8 +2280,12 @@ ice_set_link_ksettings(struct net_device *netdev,
*/
if (!bitmap_subset(copy_ks.link_modes.advertising,
safe_ks.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
- return -EINVAL;
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+ err = -EINVAL;
+ goto done;
+ }
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(safe_ks));
@@ -2197,33 +2302,27 @@ ice_set_link_ksettings(struct net_device *netdev,
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
*/
- if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base)))
- return -EOPNOTSUPP;
+ if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
- if (!timeout)
- return -EBUSY;
+ if (!timeout) {
+ err = -EBUSY;
+ goto done;
+ }
usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
}
- abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
- if (!abilities)
- return -ENOMEM;
-
- /* Get the current PHY config */
- status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
- NULL);
- if (status) {
- err = -EAGAIN;
- goto done;
- }
+ /* Copy the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config));
- /* Copy abilities to config in case autoneg is not set below */
- memset(&config, 0, sizeof(config));
- config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
- if (abilities->caps & ICE_AQC_PHY_AN_MODE)
- config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Check autoneg */
err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
@@ -2258,29 +2357,44 @@ ice_set_link_ksettings(struct net_device *netdev,
goto done;
}
- /* copy over the rest of the abilities */
- config.low_power_ctrl = abilities->low_power_ctrl;
- config.eee_cap = abilities->eee_cap;
- config.eeer_value = abilities->eeer_value;
- config.link_fec_opt = abilities->link_fec_options;
-
/* save the requested speeds */
p->phy.link_info.req_speeds = adv_link_speed;
/* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK;
- if (phy_type_low || phy_type_high) {
- config.phy_type_high = cpu_to_le64(phy_type_high) &
- abilities->phy_type_high;
- config.phy_type_low = cpu_to_le64(phy_type_low) &
- abilities->phy_type_low;
- } else {
+ /* check if there is a PHY type for the requested advertised speed */
+ if (!(phy_type_low || phy_type_high)) {
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
err = -EAGAIN;
- netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
goto done;
}
+ /* intersect requested advertised speed PHY types with media PHY types
+ * for set PHY configuration
+ */
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ abilities->phy_type_high;
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ abilities->phy_type_low;
+
+ if (!(config.phy_type_high || config.phy_type_low)) {
+ /* If there is no intersection and lenient mode is enabled, then
+ * intersect the requested advertised speed with NVM media type
+ * PHY types.
+ */
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+ config.phy_type_high = cpu_to_le64(phy_type_high) &
+ pf->nvm_phy_type_hi;
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ pf->nvm_phy_type_lo;
+ } else {
+ netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+ err = -EAGAIN;
+ goto done;
+ }
+ }
+
/* If link is up put link down */
if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
/* Tell the OS link is going down, the link will go
@@ -2292,12 +2406,15 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+ status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL);
if (status) {
netdev_info(netdev, "Set phy config failed,\n");
err = -EAGAIN;
+ goto done;
}
+ /* Save speed request */
+ p->phy.curr_user_speed_req = adv_link_speed;
done:
kfree(abilities);
clear_bit(__ICE_CFG_BUSY, pf->state);
@@ -2874,8 +2991,8 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (status)
goto out;
- pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
if (dcbx_cfg->pfc.pfcena)
/* PFC enabled so report LFC as off */
@@ -2943,8 +3060,8 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EIO;
}
- is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
kfree(pcaps);
@@ -3323,6 +3440,58 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return 0;
}
+/**
+ * ice_get_wol - get current Wake on LAN configuration
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to retrieve WoL settings
+ */
+static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (np->vsi->type != ICE_VSI_PF)
+ netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
+
+ /* Get WoL settings based on the HW capability */
+ if (ice_is_wol_supported(pf)) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+}
+
+/**
+ * ice_set_wol - set Wake on LAN on supported device
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to set WoL
+ */
+static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ /* Set WoL only if there is a new value */
+ if (pf->wol_ena != !!wol->wolopts) {
+ pf->wol_ena = !!wol->wolopts;
+ device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
+ netdev_dbg(netdev, "WoL magic packet %sabled\n",
+ pf->wol_ena ? "en" : "dis");
+ }
+
+ return 0;
+}
+
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
@@ -3806,6 +3975,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
+ .get_wol = ice_get_wol,
+ .set_wol = ice_set_wol,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
@@ -3848,6 +4019,8 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
+ .get_wol = ice_get_wol,
+ .set_wol = ice_set_wol,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4420fc02f7e7..b17ae3e20157 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
@@ -1121,8 +1121,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
u16 size;
u32 i;
- size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
- (ICE_PKG_CNT - 1));
+ size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
@@ -1180,7 +1179,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
u32 seg_count;
u32 i;
- if (len < sizeof(*pkg))
+ if (len < struct_size(pkg, seg_offset, 1))
return ICE_ERR_BUF_TOO_SHORT;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
@@ -1195,7 +1194,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
- if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+ if (len < struct_size(pkg, seg_offset, seg_count))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
@@ -1300,7 +1299,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
/* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
return ICE_ERR_NO_MEMORY;
@@ -1764,13 +1763,13 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
goto ice_create_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- sizeof(*sect_rx));
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- sizeof(*sect_tx));
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1847,7 +1846,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
}
/* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count - 1);
+ size = struct_size(sect_rx, tcam, count);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
@@ -2922,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
ICE_FLOW_ENTRY_HNDL(e));
list_del(&p->l_entry);
+
+ mutex_destroy(&p->entries_lock);
devm_kfree(ice_hw_to_dev(hw), p);
}
mutex_unlock(&hw->fl_profs_locks[blk_idx]);
@@ -3039,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t));
- memset(es->t, 0, es->count * sizeof(*es->t));
+ memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
}
@@ -3150,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
- if (!es->ref_count)
+ if (!es->written)
goto err;
}
return 0;
@@ -3324,10 +3327,10 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
u32 id;
id = ice_sect_id(blk, ICE_VEC_TBL);
- p = (struct ice_pkg_es *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
- vec_size -
- sizeof(p->es[0]));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, es, 1) +
+ vec_size -
+ sizeof(p->es[0]));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3360,8 +3363,8 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
u32 id;
id = ice_sect_id(blk, ICE_PROF_TCAM);
- p = (struct ice_prof_id_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, entry, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3396,8 +3399,8 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
u32 id;
id = ice_sect_id(blk, ICE_XLT1);
- p = (struct ice_xlt1_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, value, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3431,8 +3434,8 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
case ICE_VSI_MOVE:
case ICE_VSIG_REM:
id = ice_sect_id(blk, ICE_XLT2);
- p = (struct ice_xlt2_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ p = ice_pkg_buf_alloc_section(bld, id,
+ struct_size(p, value, 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -3875,16 +3878,16 @@ err_ice_add_prof:
}
/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
+ * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
@@ -3899,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
}
/**
- * ice_search_prof_id - Search for a profile tracking ID
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added.
- */
-static struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry;
-
- mutex_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- mutex_unlock(&hw->blk[blk].es.prof_map_lock);
-
- return entry;
-}
-
-/**
* ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -4134,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
mutex_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
@@ -4167,22 +4150,28 @@ static enum ice_status
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
@@ -4197,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
list_add(&p->list_entry, chg);
}
- return 0;
-
err_ice_get_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -4255,17 +4243,23 @@ static enum ice_status
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
@@ -4279,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
list_add(&p->list, lst);
- return 0;
+err_ice_add_prof_to_lst:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4497,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
-
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS;
@@ -4516,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
if (!t)
return ICE_ERR_NO_MEMORY;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_id_vsig;
+ }
+
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
@@ -4572,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return 0;
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
devm_kfree(ice_hw_to_dev(hw), t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index a6f391eac8ff..c1c99a267a98 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -22,7 +22,7 @@ struct ice_fv {
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
- __le32 seg_offset[1];
+ __le32 seg_offset[];
};
/* generic segment */
@@ -53,12 +53,12 @@ struct ice_device_id_entry {
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
- struct ice_device_id_entry device_table[1];
+ struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
- __le32 vers[1];
+ __le32 vers[];
};
struct ice_buf {
@@ -68,7 +68,7 @@ struct ice_buf {
struct ice_buf_table {
__le32 buf_count;
- struct ice_buf buf_array[1];
+ struct ice_buf buf_array[];
};
/* global metadata specific segment */
@@ -101,11 +101,12 @@ struct ice_section_entry {
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
- struct ice_section_entry section_entry[1];
+ struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+ struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
@@ -198,17 +199,17 @@ struct ice_label {
struct ice_label_section {
__le16 count;
- struct ice_label label[1];
+ struct ice_label label[];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_label_section) - sizeof(struct ice_label), \
- sizeof(struct ice_label))
+ struct_size((struct ice_label_section *)0, label, 1) - \
+ sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
- struct ice_fv fv[1];
+ struct ice_fv fv[];
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
@@ -245,30 +246,30 @@ struct ice_boost_tcam_entry {
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
- struct ice_boost_tcam_entry tcam[1];
+ struct ice_boost_tcam_entry tcam[];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_boost_tcam_section) - \
+ struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
- u8 value[1];
-} __packed;
+ u8 value[];
+};
struct ice_xlt2_section {
__le16 count;
__le16 offset;
- __le16 value[1];
+ __le16 value[];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
- u8 redir_value[1];
+ u8 redir_value[];
};
/* package buffer building */
@@ -327,7 +328,7 @@ struct ice_tunnel_table {
struct ice_pkg_es {
__le16 count;
__le16 offset;
- struct ice_fv_word es[1];
+ struct ice_fv_word es[];
};
struct ice_es {
@@ -461,8 +462,8 @@ struct ice_prof_tcam_entry {
struct ice_prof_id_section {
__le16 count;
- struct ice_prof_tcam_entry entry[1];
-} __packed;
+ struct ice_prof_tcam_entry entry[];
+};
struct ice_prof_tcam {
u32 sid;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d74e5290677f..fe677621dd51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
if (list_empty(&hw->fl_profs[blk]))
return 0;
- mutex_lock(&hw->fl_profs_locks[blk]);
+ mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
if (test_bit(vsi_handle, p->vsis)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break;
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- mutex_unlock(&hw->fl_profs_locks[blk]);
+ mutex_unlock(&hw->rss_locks);
return status;
}
@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
mutex_unlock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
new file mode 100644
index 000000000000..deaefe00c9c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/uuid.h>
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include "ice.h"
+#include "ice_fw_update.h"
+
+struct ice_fwu_priv {
+ struct pldmfw context;
+
+ struct ice_pf *pf;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+};
+
+/**
+ * ice_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 *package_data;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ status = ice_nvm_set_pkg_data(hw, false, package_data, length, NULL);
+
+ kfree(package_data);
+
+ if (status) {
+ dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_check_component_response - Report firmware response to a component
+ * @pf: device private data structure
+ * @id: component id being checked
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Returns: zero if the component can be updated, or -ECANCELED of the
+ * firmware indicates the component cannot be updated.
+ */
+static int
+ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const char *component;
+
+ switch (id) {
+ case NVM_COMP_ID_OROM:
+ component = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ component = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ component = "fw.netlist";
+ break;
+ default:
+ WARN(1, "Unexpected unknown component identifier 0x%02x", id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: firmware response 0x%x, code 0x%x\n",
+ component, response, code);
+
+ switch (response) {
+ case ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* firmware indicated this update is good to proceed */
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ dev_warn(dev, "firmware recommends not updating %s, as it may result in a downgrade. continuing anyways\n", component);
+ return 0;
+ case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ dev_info(dev, "firmware has rejected updating %s\n", component);
+ break;
+ }
+
+ switch (code) {
+ case ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ dev_err(dev, "Component comparison stamp for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_STAMP_LOWER:
+ dev_err(dev, "Component comparison stamp for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is lower than running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ dev_err(dev, "Component comparison stamp for %s is invalid\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is invalid");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE:
+ dev_err(dev, "%s conflicts with a previous component table\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component table conflict occurred");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ dev_err(dev, "Pre-requisites for component %s have not been met\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ dev_err(dev, "%s is not a supported component\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ dev_err(dev, "Security restrictions prevent %s from being downgraded\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ dev_err(dev, "Received an incomplete component image for %s\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ dev_err(dev, "Component version for %s is identical to the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is identical to running image");
+ break;
+ case ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ dev_err(dev, "Component version for %s is lower than the running image\n",
+ component);
+ NL_SET_ERR_MSG_MOD(extack, "Component version is lower than the running image");
+ break;
+ default:
+ dev_err(dev, "Unexpected response code 0x02%x for %s\n",
+ code, component);
+ NL_SET_ERR_MSG_MOD(extack, "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ice_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends AdminQ commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_send_component_table(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_aqc_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct device *dev = context->dev;
+ struct ice_pf *pf = priv->pf;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ size_t length;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ dev_err(dev, "Unable to update due to a firmware component with unknown ID %u\n",
+ component->identifier);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+ memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+
+ status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code, NULL);
+
+ kfree(comp_tbl);
+
+ if (status) {
+ dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ice_check_component_response(pf, component->identifier, comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ice_write_one_nvm_block - Write an NVM block and await completion response
+ * @pf: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 completion_offset;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (status) {
+ dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
+ module, offset, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
+ module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return -EIO;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
+ completion_offset |= event.desc.params.nvm.offset_high << 16;
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
+ completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_offset != offset) {
+ dev_err(dev, "Unexpected offset in write completion: got %u, expected %u\n",
+ completion_offset, offset);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ return -EIO;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
+ module, offset,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_nvm_module - Write data to an NVM module
+ * @pf: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ const u8 *image, u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, ICE_AQ_MAX_BUF_LEN, length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ /* ice_aq_update_nvm may copy the firmware response into the
+ * buffer, so we must make a copy since the source data is
+ * constant.
+ */
+ memcpy(block, image + offset, block_size);
+
+ err = ice_write_one_nvm_block(pf, module, offset, block_size,
+ block, last_cmd, extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+ return err;
+}
+
+/**
+ * ice_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @pf: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ u16 completion_module, completion_retval;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ struct devlink *devlink;
+ enum ice_status status;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ devlink = priv_to_devlink(pf);
+
+ devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0);
+
+ status = ice_aq_erase_nvm(hw, module, NULL);
+ if (status) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
+ component, module, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ /* Yes, this really can take minutes to complete */
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
+ component, module, err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ goto out_notify_devlink;
+ }
+
+ completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(event.desc.retval);
+
+ if (completion_module != module) {
+ dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
+ component, completion_module, module);
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ component, module,
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
+ err = -EIO;
+ goto out_notify_devlink;
+ }
+
+out_notify_devlink:
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ice_switch_flash_banks - Tell firmware to switch NVM banks
+ * @pf: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Returns: zero on success or an error code on failure.
+ */
+static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u16 completion_retval;
+ int err;
+
+ memset(&event, 0, sizeof(event));
+
+ status = ice_nvm_write_activate(hw, activate_flags);
+ if (status) {
+ dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
+ return -EIO;
+ }
+
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ,
+ &event);
+ if (err) {
+ dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
+ err);
+ NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
+ return err;
+ }
+
+ completion_retval = le16_to_cpu(event.desc.retval);
+ if (completion_retval) {
+ dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
+ ice_aq_str((enum ice_aq_err)completion_retval));
+ NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ const char *name;
+ u16 module;
+ u8 flag;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = ICE_SR_1ST_OROM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = ICE_SR_1ST_NVM_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = ICE_SR_NETLIST_BANK_PTR;
+ flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+ default:
+ /* This should not trigger, since we check the id before
+ * sending the component table to firmware.
+ */
+ WARN(1, "Unexpected unknown component identifier 0x%02x",
+ component->identifier);
+ return -EINVAL;
+ }
+
+ /* Mark this component for activating at the end */
+ priv->activate_flags |= flag;
+
+ err = ice_erase_nvm_module(pf, module, name, extack);
+ if (err)
+ return err;
+
+ return ice_write_nvm_module(pf, module, name, component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ice_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Returns: 0 on success, or an error code on failure.
+ */
+static int ice_finalize_update(struct pldmfw *context)
+{
+ struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ice_pf *pf = priv->pf;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct pldmfw_ops ice_fwu_ops = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
+/**
+ * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @pf: private device driver structure
+ * @fw: firmware object pointing to the relevant firmware file
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ enum ice_status status;
+ int err;
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ice_fwu_ops;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, fw);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ice_check_for_pending_update - Check for a pending flash update
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Check whether the device already has a pending flash update. If such an
+ * update is found, cancel it so that the requested update may proceed.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw_dev_caps *dev_caps;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 pending = 0;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ /* Read the most recent device capabilities from firmware. Do not use
+ * the cached values in hw->dev_caps, because the pending update flag
+ * may have changed, e.g. if an update was previously completed and
+ * the system has not yet rebooted.
+ */
+ status = ice_discover_dev_caps(hw, dev_caps);
+ if (status) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm) {
+ dev_info(dev, "The fw.mgmt flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_orom) {
+ dev_info(dev, "The fw.undi flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ }
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist) {
+ dev_info(dev, "The fw.netlist flash component has a pending update\n");
+ pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ }
+
+ kfree(dev_caps);
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ else
+ WARN(1, "Unexpected flash component %s", component);
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ status = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (status) {
+ dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
+ err = ice_switch_flash_banks(pf, pending, extack);
+
+ ice_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
new file mode 100644
index 000000000000..79472cc618b4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _ICE_FW_UPDATE_H_
+#define _ICE_FW_UPDATE_H_
+
+int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1086c9f778b4..90abc8612a6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
-#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
+#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -362,13 +362,22 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
+#define PFPM_APM 0x000B8080
+#define PFPM_APM_APME_M BIT(0)
+#define PFPM_WUFC 0x0009DC00
+#define PFPM_WUFC_MAG_M BIT(1)
+#define PFPM_WUS 0x0009DB80
+#define PFPM_WUS_LNKC_M BIT(0)
+#define PFPM_WUS_MAG_M BIT(1)
+#define PFPM_WUS_MNG_M BIT(3)
+#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
-#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 14dfbbc1b2cf..4ec24c3e813f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -601,6 +601,7 @@ struct ice_tlan_ctx {
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(3),
+ ICE_PTT_UNUSED_ENTRY(4),
+ ICE_PTT_UNUSED_ENTRY(5),
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(8),
+ ICE_PTT_UNUSED_ENTRY(9),
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(12),
+ ICE_PTT_UNUSED_ENTRY(13),
+ ICE_PTT_UNUSED_ENTRY(14),
+ ICE_PTT_UNUSED_ENTRY(15),
+ ICE_PTT_UNUSED_ENTRY(16),
+ ICE_PTT_UNUSED_ENTRY(17),
+ ICE_PTT_UNUSED_ENTRY(18),
+ ICE_PTT_UNUSED_ENTRY(19),
+ ICE_PTT_UNUSED_ENTRY(20),
+ ICE_PTT_UNUSED_ENTRY(21),
+
+ /* Non Tunneled IPv4 */
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(25),
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(32),
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(39),
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(47),
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(54),
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(62),
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(69),
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(77),
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(84),
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT_UNUSED_ENTRY(91),
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(98),
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(105),
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(113),
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(120),
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(128),
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(135),
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(143),
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(150),
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ ICE_PTT_UNUSED_ENTRY(154),
+ ICE_PTT_UNUSED_ENTRY(155),
+ ICE_PTT_UNUSED_ENTRY(156),
+ ICE_PTT_UNUSED_ENTRY(157),
+ ICE_PTT_UNUSED_ENTRY(158),
+ ICE_PTT_UNUSED_ENTRY(159),
+
+ ICE_PTT_UNUSED_ENTRY(160),
+ ICE_PTT_UNUSED_ENTRY(161),
+ ICE_PTT_UNUSED_ENTRY(162),
+ ICE_PTT_UNUSED_ENTRY(163),
+ ICE_PTT_UNUSED_ENTRY(164),
+ ICE_PTT_UNUSED_ENTRY(165),
+ ICE_PTT_UNUSED_ENTRY(166),
+ ICE_PTT_UNUSED_ENTRY(167),
+ ICE_PTT_UNUSED_ENTRY(168),
+ ICE_PTT_UNUSED_ENTRY(169),
+
+ ICE_PTT_UNUSED_ENTRY(170),
+ ICE_PTT_UNUSED_ENTRY(171),
+ ICE_PTT_UNUSED_ENTRY(172),
+ ICE_PTT_UNUSED_ENTRY(173),
+ ICE_PTT_UNUSED_ENTRY(174),
+ ICE_PTT_UNUSED_ENTRY(175),
+ ICE_PTT_UNUSED_ENTRY(176),
+ ICE_PTT_UNUSED_ENTRY(177),
+ ICE_PTT_UNUSED_ENTRY(178),
+ ICE_PTT_UNUSED_ENTRY(179),
+
+ ICE_PTT_UNUSED_ENTRY(180),
+ ICE_PTT_UNUSED_ENTRY(181),
+ ICE_PTT_UNUSED_ENTRY(182),
+ ICE_PTT_UNUSED_ENTRY(183),
+ ICE_PTT_UNUSED_ENTRY(184),
+ ICE_PTT_UNUSED_ENTRY(185),
+ ICE_PTT_UNUSED_ENTRY(186),
+ ICE_PTT_UNUSED_ENTRY(187),
+ ICE_PTT_UNUSED_ENTRY(188),
+ ICE_PTT_UNUSED_ENTRY(189),
+
+ ICE_PTT_UNUSED_ENTRY(190),
+ ICE_PTT_UNUSED_ENTRY(191),
+ ICE_PTT_UNUSED_ENTRY(192),
+ ICE_PTT_UNUSED_ENTRY(193),
+ ICE_PTT_UNUSED_ENTRY(194),
+ ICE_PTT_UNUSED_ENTRY(195),
+ ICE_PTT_UNUSED_ENTRY(196),
+ ICE_PTT_UNUSED_ENTRY(197),
+ ICE_PTT_UNUSED_ENTRY(198),
+ ICE_PTT_UNUSED_ENTRY(199),
+
+ ICE_PTT_UNUSED_ENTRY(200),
+ ICE_PTT_UNUSED_ENTRY(201),
+ ICE_PTT_UNUSED_ENTRY(202),
+ ICE_PTT_UNUSED_ENTRY(203),
+ ICE_PTT_UNUSED_ENTRY(204),
+ ICE_PTT_UNUSED_ENTRY(205),
+ ICE_PTT_UNUSED_ENTRY(206),
+ ICE_PTT_UNUSED_ENTRY(207),
+ ICE_PTT_UNUSED_ENTRY(208),
+ ICE_PTT_UNUSED_ENTRY(209),
+
+ ICE_PTT_UNUSED_ENTRY(210),
+ ICE_PTT_UNUSED_ENTRY(211),
+ ICE_PTT_UNUSED_ENTRY(212),
+ ICE_PTT_UNUSED_ENTRY(213),
+ ICE_PTT_UNUSED_ENTRY(214),
+ ICE_PTT_UNUSED_ENTRY(215),
+ ICE_PTT_UNUSED_ENTRY(216),
+ ICE_PTT_UNUSED_ENTRY(217),
+ ICE_PTT_UNUSED_ENTRY(218),
+ ICE_PTT_UNUSED_ENTRY(219),
+
+ ICE_PTT_UNUSED_ENTRY(220),
+ ICE_PTT_UNUSED_ENTRY(221),
+ ICE_PTT_UNUSED_ENTRY(222),
+ ICE_PTT_UNUSED_ENTRY(223),
+ ICE_PTT_UNUSED_ENTRY(224),
+ ICE_PTT_UNUSED_ENTRY(225),
+ ICE_PTT_UNUSED_ENTRY(226),
+ ICE_PTT_UNUSED_ENTRY(227),
+ ICE_PTT_UNUSED_ENTRY(228),
+ ICE_PTT_UNUSED_ENTRY(229),
+
+ ICE_PTT_UNUSED_ENTRY(230),
+ ICE_PTT_UNUSED_ENTRY(231),
+ ICE_PTT_UNUSED_ENTRY(232),
+ ICE_PTT_UNUSED_ENTRY(233),
+ ICE_PTT_UNUSED_ENTRY(234),
+ ICE_PTT_UNUSED_ENTRY(235),
+ ICE_PTT_UNUSED_ENTRY(236),
+ ICE_PTT_UNUSED_ENTRY(237),
+ ICE_PTT_UNUSED_ENTRY(238),
+ ICE_PTT_UNUSED_ENTRY(239),
+
+ ICE_PTT_UNUSED_ENTRY(240),
+ ICE_PTT_UNUSED_ENTRY(241),
+ ICE_PTT_UNUSED_ENTRY(242),
+ ICE_PTT_UNUSED_ENTRY(243),
+ ICE_PTT_UNUSED_ENTRY(244),
+ ICE_PTT_UNUSED_ENTRY(245),
+ ICE_PTT_UNUSED_ENTRY(246),
+ ICE_PTT_UNUSED_ENTRY(247),
+ ICE_PTT_UNUSED_ENTRY(248),
+ ICE_PTT_UNUSED_ENTRY(249),
+
+ ICE_PTT_UNUSED_ENTRY(250),
+ ICE_PTT_UNUSED_ENTRY(251),
+ ICE_PTT_UNUSED_ENTRY(252),
+ ICE_PTT_UNUSED_ENTRY(253),
+ ICE_PTT_UNUSED_ENTRY(254),
+ ICE_PTT_UNUSED_ENTRY(255),
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 28b46cc9f5cb..f2682776f8c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
case ICE_VSI_PF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
- vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
- vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ /* a user could change the values of num_[tr]x_desc using
+ * ethtool -G so we should keep those values instead of
+ * overwriting them with the defaults.
+ */
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
@@ -1194,7 +1200,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
for (i = 0; i < vsi->alloc_txq; i++) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
}
}
}
@@ -1202,7 +1208,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
for (i = 0; i < vsi->alloc_rxq; i++) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
}
}
}
@@ -1235,7 +1241,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
- vsi->tx_rings[i] = ring;
+ WRITE_ONCE(vsi->tx_rings[i], ring);
}
/* Allocate Rx rings */
@@ -1254,7 +1260,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
@@ -1468,6 +1474,30 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
+ * ice_pf_state_is_nominal - checks the PF for nominal state
+ * @pf: pointer to PF to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state, false otherwise
+ */
+bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
*/
@@ -1667,7 +1697,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
u16 q_idx = 0;
int err = 0;
- qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
@@ -1987,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
if (!vsi)
return -EINVAL;
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
pf = vsi->back;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d80e6afa4511..981f3a156c24 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -8,6 +8,8 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
+bool ice_pf_state_is_nominal(struct ice_pf *pf);
+
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 082825e3cb39..8437d72795b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <generated/utsrelease.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
@@ -13,15 +14,7 @@
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 2
-
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
- __stringify(DRV_VERSION_MINOR) "." \
- __stringify(DRV_VERSION_BUILD) "-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
-const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
@@ -32,7 +25,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
static int debug = -1;
@@ -377,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ ice_cfg_vlan_pruning(vsi, false, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -389,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
+ if (vsi->num_vlan > 1)
+ ice_cfg_vlan_pruning(vsi, true, false);
}
}
}
@@ -620,6 +615,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
+ const char *an_advertised;
enum ice_status status;
const char *fec_req;
const char *speed;
@@ -718,6 +714,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps) {
fec_req = "Unknown";
+ an_advertised = "Unknown";
goto done;
}
@@ -726,6 +723,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
if (status)
netdev_info(vsi->netdev, "Get phy capability failed.\n");
+ an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
+
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
fec_req = "RS-FEC";
@@ -738,8 +737,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
- speed, fec_req, fec, an, fc);
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
+ speed, fec_req, fec, an_advertised, an, fc);
ice_print_topo_conflict(vsi);
}
@@ -771,6 +770,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
}
/**
+ * ice_set_dflt_mib - send a default config MIB to the FW
+ * @pf: private PF struct
+ *
+ * This function sends a default configuration MIB to the FW.
+ *
+ * If this function errors out at any point, the driver is still able to
+ * function. The main impact is that LFC may not operate as expected.
+ * Therefore an error state in this function should be treated with a DBG
+ * message and continue on with driver rebuild/reenable.
+ */
+static void ice_set_dflt_mib(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u8 mib_type, *buf, *lldpmib = NULL;
+ u16 len, typelen, offset = 0;
+ struct ice_lldp_org_tlv *tlv;
+ struct ice_hw *hw;
+ u32 ouisubtype;
+
+ if (!pf) {
+ dev_dbg(dev, "%s NULL pf pointer\n", __func__);
+ return;
+ }
+
+ hw = &pf->hw;
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib) {
+ dev_dbg(dev, "%s Failed to allocate MIB memory\n",
+ __func__);
+ return;
+ }
+
+ /* Add ETS CFG TLV */
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf = tlv->tlvinfo;
+ buf[0] = 0;
+
+ /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
+ * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
+ * Octets 13 - 20 are TSA values - leave as zeros
+ */
+ buf[5] = 0x64;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add ETS REC TLV */
+ buf = tlv->tlvinfo;
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First octet of buf is reserved
+ * Octets 1 - 4 map UP to TC - all UPs map to zero
+ * Octets 5 - 12 are BW values - set TC 0 to 100%.
+ * Octets 13 - 20 are TSA value - leave as zeros
+ */
+ buf[5] = 0x64;
+ offset += len + 2;
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+
+ /* Add PFC CFG TLV */
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Octet 1 left as all zeros - PFC disabled */
+ buf[0] = 0x08;
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ offset += len + 2;
+
+ if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
+ dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
+
+ kfree(lldpmib);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -804,9 +897,11 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
- /* if the old link up/down and speed is the same as the new */
- if (link_up == old_link && link_speed == old_link_speed)
- return result;
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
+ link_up = true;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
@@ -825,7 +920,17 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
}
}
- ice_dcb_rebuild(pf);
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
+
+ if (ice_is_dcb_active(pf)) {
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+ } else {
+ if (link_up)
+ ice_set_dflt_mib(pf);
+ }
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -918,6 +1023,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status;
}
+enum ice_aq_task_state {
+ ICE_AQ_TASK_WAITING = 0,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+
+ u16 opcode;
+ struct ice_rq_event_info *event;
+ enum ice_aq_task_state state;
+};
+
+/**
+ * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode to wait for
+ * @timeout: how long to wait, in jiffies
+ * @event: storage for the event info
+ *
+ * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
+ * current thread will be put to sleep until the specified event occurs or
+ * until the given timeout is reached.
+ *
+ * To obtain only the descriptor contents, pass an event without an allocated
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * event->msg_buf with enough space ahead of time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ long ret;
+ int err;
+
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->event = event;
+ task->state = ICE_AQ_TASK_WAITING;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ timeout);
+ switch (task->state) {
+ case ICE_AQ_TASK_WAITING:
+ err = ret < 0 ? ret : -ETIMEDOUT;
+ break;
+ case ICE_AQ_TASK_CANCELED:
+ err = ret < 0 ? ret : -ECANCELED;
+ break;
+ case ICE_AQ_TASK_COMPLETE:
+ err = ret < 0 ? ret : 0;
+ break;
+ default:
+ WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ err = -EINVAL;
+ break;
+ }
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_del(&task->entry);
+ spin_unlock_bh(&pf->aq_wait_lock);
+ kfree(task);
+
+ return err;
+}
+
+/**
+ * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
+ * @pf: pointer to the PF private structure
+ * @opcode: the opcode of the event
+ * @event: the event to check
+ *
+ * Loops over the current list of pending threads waiting for an AdminQ event.
+ * For each matching task, copy the contents of the event into the task
+ * structure and wake up the thread.
+ *
+ * If multiple threads wait for the same opcode, they will all be woken up.
+ *
+ * Note that event->msg_buf will only be duplicated if the event has a buffer
+ * with enough space already allocated. Otherwise, only the descriptor and
+ * message length will be copied.
+ *
+ * Returns: true if an event was found, false otherwise
+ */
+static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
+ struct ice_rq_event_info *event)
+{
+ struct ice_aq_task *task;
+ bool found = false;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
+ if (task->state || task->opcode != opcode)
+ continue;
+
+ memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
+ task->event->msg_len = event->msg_len;
+
+ /* Only copy the data buffer if a destination was set */
+ if (task->event->msg_buf &&
+ task->event->buf_len > event->buf_len) {
+ memcpy(task->event->msg_buf, event->msg_buf,
+ event->buf_len);
+ task->event->buf_len = event->buf_len;
+ }
+
+ task->state = ICE_AQ_TASK_COMPLETE;
+ found = true;
+ }
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ if (found)
+ wake_up(&pf->aq_wait_queue);
+}
+
+/**
+ * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
+ * @pf: the PF private structure
+ *
+ * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
+ * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
+ */
+static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
+{
+ struct ice_aq_task *task;
+
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_for_each_entry(task, &pf->aq_wait_list, entry)
+ task->state = ICE_AQ_TASK_CANCELED;
+ spin_unlock_bh(&pf->aq_wait_lock);
+
+ wake_up(&pf->aq_wait_queue);
+}
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
@@ -1014,6 +1264,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
+ /* Notify any thread that might be waiting for this event */
+ ice_aq_check_events(pf, opcode, &event);
+
switch (opcode) {
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf, &event))
@@ -1137,10 +1390,15 @@ static void ice_service_task_complete(struct ice_pf *pf)
/**
* ice_service_task_stop - stop service task and cancel works
* @pf: board private structure
+ *
+ * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
+ * 1 otherwise.
*/
-static void ice_service_task_stop(struct ice_pf *pf)
+static int ice_service_task_stop(struct ice_pf *pf)
{
- set_bit(__ICE_SERVICE_DIS, pf->state);
+ int ret;
+
+ ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
del_timer_sync(&pf->serv_tmr);
@@ -1148,6 +1406,7 @@ static void ice_service_task_stop(struct ice_pf *pf)
cancel_work_sync(&pf->serv_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
+ return ret;
}
/**
@@ -1382,25 +1641,23 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ /* Use the current user PHY configuration. The current user PHY
+ * configuration is initialized during probe from PHY capabilities
+ * software mode, and updated on set PHY configuration.
+ */
+ cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
@@ -1414,8 +1671,312 @@ out:
}
/**
- * ice_check_media_subtask - Check for media; bring link up if detected.
+ * ice_init_nvm_phy_type - Initialize the NVM PHY type
+ * @pi: port info structure
+ *
+ * Initialize nvm_phy_type_[low|high] for link lenient mode support
+ */
+static int ice_init_nvm_phy_type(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ int err = 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
+ NULL);
+
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto out;
+ }
+
+ pf->nvm_phy_type_hi = pcaps->phy_type_high;
+ pf->nvm_phy_type_lo = pcaps->phy_type_low;
+
+out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_init_link_dflt_override - Initialize link default override
+ * @pi: port info structure
+ *
+ * Initialize link default override and PHY total port shutdown during probe
+ */
+static void ice_init_link_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+ if (ice_get_link_default_override(ldo, pi))
+ return;
+
+ if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
+ return;
+
+ /* Enable Total Port Shutdown (override/replace link-down-on-close
+ * ethtool private flag) for ports with Port Disable bit set.
+ */
+ set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+}
+
+/**
+ * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
+ * @pi: port info structure
+ *
+ * If default override is enabled, initialized the user PHY cfg speed and FEC
+ * settings using the default override mask from the NVM.
+ *
+ * The PHY should only be configured with the default override settings the
+ * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
+ * is used to indicate that the user PHY cfg default override is initialized
+ * and the PHY has not been configured with the default override settings. The
+ * state is set here, and cleared in ice_configure_phy the first time the PHY is
+ * configured.
+ */
+static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
+{
+ struct ice_link_default_override_tlv *ldo;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+
+ ldo = &pf->link_dflt_override;
+
+ /* If link default override is enabled, use to mask NVM PHY capabilities
+ * for speed and FEC default configuration.
+ */
+ cfg = &phy->curr_user_phy_cfg;
+
+ if (ldo->phy_type_low || ldo->phy_type_high) {
+ cfg->phy_type_low = pf->nvm_phy_type_lo &
+ cpu_to_le64(ldo->phy_type_low);
+ cfg->phy_type_high = pf->nvm_phy_type_hi &
+ cpu_to_le64(ldo->phy_type_high);
+ }
+ cfg->link_fec_opt = ldo->fec_options;
+ phy->curr_user_fec_req = ICE_FEC_AUTO;
+
+ set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
+}
+
+/**
+ * ice_init_phy_user_cfg - Initialize the PHY user configuration
+ * @pi: port info structure
+ *
+ * Initialize the current user PHY configuration, speed, FEC, and FC requested
+ * mode to default. The PHY defaults are from get PHY capabilities topology
+ * with media so call when media is first available. An error is returned if
+ * called when media is not available. The PHY initialization completed state is
+ * set here.
+ *
+ * These configurations are used when setting PHY
+ * configuration. The user PHY configuration is updated on set PHY
+ * configuration. Returns 0 on success, negative on failure
+ */
+static int ice_init_phy_user_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_phy_info *phy = &pi->phy;
+ struct ice_pf *pf = pi->hw->back;
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ int err = 0;
+
+ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EIO;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
+
+ /* check if lenient mode is supported and enabled */
+ if (ice_fw_supports_link_override(&vsi->back->hw) &&
+ !(pcaps->module_compliance_enforcement &
+ ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
+ set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
+
+ /* if link default override is enabled, initialize user PHY
+ * configuration with link default override values
+ */
+ if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
+ ice_init_phy_cfg_dflt_override(pi);
+ goto out;
+ }
+ }
+
+ /* if link default override is not enabled, initialize PHY using
+ * topology with media
+ */
+ phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
+ pcaps->link_fec_options);
+ phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
+
+out:
+ phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
+ set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
+err_out:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_configure_phy - configure PHY
+ * @vsi: VSI of PHY
+ *
+ * Set the PHY configuration. If the current PHY configuration is the same as
+ * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
+ * configure the based get PHY capabilities for topology with media.
+ */
+static int ice_configure_phy(struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+ if (!pi)
+ return -EINVAL;
+
+ /* Ensure we have media as we cannot configure a medialess port */
+ if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+ return -EPERM;
+
+ ice_print_topo_conflict(vsi);
+
+ if (vsi->port_info->phy.link_info.topo_media_conflict ==
+ ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ return -EPERM;
+
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
+ return ice_force_phys_link_state(vsi, true);
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ /* If PHY enable link is configured and configuration has not changed,
+ * there's nothing to do
+ */
+ if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
+ ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
+ goto done;
+
+ /* Use PHY topology as baseline for configuration */
+ memset(pcaps, 0, sizeof(*pcaps));
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status) {
+ dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ goto done;
+ }
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
+
+ /* Speed - If default override pending, use curr_user_phy_cfg set in
+ * ice_init_phy_user_cfg_ldo.
+ */
+ if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+ vsi->back->state)) {
+ cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
+ cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
+ } else {
+ u64 phy_low = 0, phy_high = 0;
+
+ ice_update_phy_type(&phy_low, &phy_high,
+ pi->phy.curr_user_speed_req);
+ cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
+ cfg->phy_type_high = pcaps->phy_type_high &
+ cpu_to_le64(phy_high);
+ }
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (!cfg->phy_type_low && !cfg->phy_type_high) {
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ }
+
+ /* FEC */
+ ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+
+ /* Can't provide what was requested; use PHY capabilities */
+ if (cfg->link_fec_opt !=
+ (cfg->link_fec_opt & pcaps->link_fec_options)) {
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ }
+
+ /* Flow Control - always supported; no need to check against
+ * capabilities
+ */
+ ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
+
+ /* Enable link and link update */
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
+ if (status) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
+ err = -EIO;
+ }
+
+ kfree(cfg);
+done:
+ kfree(pcaps);
+ return err;
+}
+
+/**
+ * ice_check_media_subtask - Check for media
* @pf: pointer to PF struct
+ *
+ * If media is available, then initialize PHY user configuration if it is not
+ * been, and configure the PHY if the interface is up.
*/
static void ice_check_media_subtask(struct ice_pf *pf)
{
@@ -1423,15 +1984,12 @@ static void ice_check_media_subtask(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ /* No need to check for media if it's already present */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
return;
- /* No need to check for media if it's already present or the interface
- * is down
- */
- if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
- test_bit(__ICE_DOWN, vsi->state))
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
return;
/* Refresh link info and check if media is present */
@@ -1441,10 +1999,19 @@ static void ice_check_media_subtask(struct ice_pf *pf)
return;
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
- if (err)
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
+ ice_init_phy_user_cfg(pi);
+
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (test_bit(__ICE_DOWN, vsi->state) &&
+ test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
return;
- clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ err = ice_configure_phy(vsi);
+ if (!err)
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
@@ -1702,7 +2269,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- vsi->xdp_rings[i] = xdp_ring;
+ WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
@@ -1982,9 +2549,6 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_QUERY_PROG:
- xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
xdp->xsk.queue_id);
@@ -2779,6 +3343,10 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
@@ -2949,6 +3517,27 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_is_wol_supported - get NVM state of WoL
+ * @pf: board private structure
+ *
+ * Check if WoL is supported based on the HW configuration.
+ * Returns true if NVM supports and enables WoL for this port, false otherwise
+ */
+bool ice_is_wol_supported(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u16 wol_ctrl;
+
+ /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
+ * word) indicates WoL is not supported on the corresponding PF ID.
+ */
+ if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
+ return false;
+
+ return !(BIT(hw->pf_id) & wol_ctrl);
+}
+
+/**
* ice_vsi_recfg_qs - Change the number of queues on a VSI
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
@@ -2995,6 +3584,60 @@ done:
}
/**
+ * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
+ * @pf: PF to configure
+ *
+ * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
+ * VSI can still Tx/Rx VLAN tagged packets.
+ */
+static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_ctx *ctxt;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!vsi)
+ return;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return;
+
+ hw = &pf->hw;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ /* disable VLAN anti-spoof */
+ ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ /* disable VLAN pruning and keep all other settings */
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ /* allow all VLANs on Tx and don't strip on Rx */
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+
+ status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ }
+
+ kfree(ctxt);
+}
+
+/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
@@ -3168,11 +3811,11 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
- dv.major_ver = DRV_VERSION_MAJOR;
- dv.minor_ver = DRV_VERSION_MINOR;
- dv.build_ver = DRV_VERSION_BUILD;
+ dv.major_ver = 0xff;
+ dv.minor_ver = 0xff;
+ dv.build_ver = 0xff;
dv.subbuild_ver = 0;
- strscpy((char *)dv.driver_string, DRV_VERSION,
+ strscpy((char *)dv.driver_string, UTS_RELEASE,
sizeof(dv.driver_string));
return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
}
@@ -3296,6 +3939,33 @@ dflt_pkg_load:
}
/**
+ * ice_print_wake_reason - show the wake up cause in the log
+ * @pf: pointer to the PF struct
+ */
+static void ice_print_wake_reason(struct ice_pf *pf)
+{
+ u32 wus = pf->wakeup_reason;
+ const char *wake_str;
+
+ /* if no wake event, nothing to print */
+ if (!wus)
+ return;
+
+ if (wus & PFPM_WUS_LNKC_M)
+ wake_str = "Link\n";
+ else if (wus & PFPM_WUS_MAG_M)
+ wake_str = "Magic Packet\n";
+ else if (wus & PFPM_WUS_MNG_M)
+ wake_str = "Management\n";
+ else if (wus & PFPM_WUS_FW_RST_WK_M)
+ wake_str = "Firmware Reset\n";
+ else
+ wake_str = "Unknown\n";
+
+ dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -3463,8 +4133,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_send_version(pf);
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- ice_drv_ver, err);
- goto err_alloc_sw_unroll;
+ UTS_RELEASE, err);
+ goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
@@ -3473,14 +4143,60 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_alloc_sw_unroll;
+ goto err_send_version_unroll;
+ }
+
+ err = ice_init_nvm_phy_type(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ err = ice_update_link_info(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_update_link_info failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ ice_init_link_dflt_override(pf->hw.port_info);
+
+ /* if media available, initialize PHY settings */
+ if (pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_init_phy_user_cfg(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
+ goto err_send_version_unroll;
+ }
+
+ if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (vsi)
+ ice_configure_phy(vsi);
+ }
+ } else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, we are done with probe */
- if (ice_is_safe_mode(pf))
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(dev, false);
+
+ if (ice_is_safe_mode(pf)) {
+ ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
+ }
/* initialize DDP driven features */
@@ -3504,6 +4220,8 @@ probe_done:
clear_bit(__ICE_DOWN, pf->state);
return 0;
+err_send_version_unroll:
+ ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
@@ -3522,10 +4240,73 @@ err_init_pf_unroll:
err_exit_unroll:
ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
return err;
}
/**
+ * ice_set_wake - enable or disable Wake on LAN
+ * @pf: pointer to the PF struct
+ *
+ * Simple helper for WoL control
+ */
+static void ice_set_wake(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool wol = pf->wol_ena;
+
+ /* clear wake state, otherwise new wake events won't fire */
+ wr32(hw, PFPM_WUS, U32_MAX);
+
+ /* enable / disable APM wake up, no RMW needed */
+ wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
+
+ /* set magic packet filter enabled */
+ wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
+}
+
+/**
+ * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * @pf: pointer to the PF struct
+ *
+ * Issue firmware command to enable multicast magic wake, making
+ * sure that any locally administered address (LAA) is used for
+ * wake, and that PF reset doesn't undo the LAA.
+ */
+static void ice_setup_mc_magic_wake(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u8 mac_addr[ETH_ALEN];
+ struct ice_vsi *vsi;
+ u8 flags;
+
+ if (!pf->wol_ena)
+ return;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Get current MAC address in case it's an LAA */
+ if (vsi->netdev)
+ ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
+ else
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+
+ flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
+ ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
+ ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
+
+ status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
+ if (status)
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+}
+
+/**
* ice_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -3551,11 +4332,15 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ ice_setup_mc_magic_wake(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
+ ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
@@ -3575,9 +4360,231 @@ static void ice_remove(struct pci_dev *pdev)
pci_wait_for_pending_transaction(pdev);
ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
}
/**
+ * ice_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void ice_shutdown(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ ice_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * ice_prepare_for_shutdown - prep for PCI shutdown
+ * @pf: board private structure
+ *
+ * Inform or close all dependent features in prep for PCI device shutdown
+ */
+static void ice_prepare_for_shutdown(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 v;
+
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+
+ dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
+
+ /* disable the VSIs and their queues that are not already DOWN */
+ ice_pf_dis_all_vsi(pf, false);
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ pf->vsi[v]->vsi_num = 0;
+
+ ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
+ * @pf: board private structure to reinitialize
+ *
+ * This routine reinitialize interrupt scheme that was cleared during
+ * power management suspend callback.
+ *
+ * This should be called during resume routine to re-allocate the q_vectors
+ * and reacquire interrupts.
+ */
+static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret, v;
+
+ /* Since we clear MSIX flag during suspend, we need to
+ * set it back during resume...
+ */
+
+ ret = ice_init_interrupt_scheme(pf);
+ if (ret) {
+ dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* Remap vectors and rings, after successful re-init interrupts */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
+ if (ret)
+ goto err_reinit;
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+
+ ret = ice_req_irq_msix_misc(pf);
+ if (ret) {
+ dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
+ ret);
+ goto err_reinit;
+ }
+
+ return 0;
+
+err_reinit:
+ while (v--)
+ if (pf->vsi[v])
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+
+ return ret;
+}
+
+/**
+ * ice_suspend
+ * @dev: generic device information structure
+ *
+ * Power Management callback to quiesce the device and prepare
+ * for D3 transition.
+ */
+static int __maybe_unused ice_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ice_pf *pf;
+ int disabled, v;
+
+ pf = pci_get_drvdata(pdev);
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Device is not ready, no need to suspend it\n");
+ return -EBUSY;
+ }
+
+ /* Stop watchdog tasks until resume completion.
+ * Even though it is most likely that the service task is
+ * disabled if the device is suspended or down, the service task's
+ * state is controlled by a different state bit, and we should
+ * store and honor whatever state that bit is in at this point.
+ */
+ disabled = ice_service_task_stop(pf);
+
+ /* Already suspended?, then there is nothing to do */
+ if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "can't suspend device in reset or already down\n");
+ if (!disabled)
+ ice_service_task_restart(pf);
+ return 0;
+ }
+
+ ice_setup_mc_magic_wake(pf);
+
+ ice_prepare_for_shutdown(pf);
+
+ ice_set_wake(pf);
+
+ /* Free vectors, clear the interrupt scheme and release IRQs
+ * for proper hibernation, especially with large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ ice_vsi_free_q_vectors(pf->vsi[v]);
+ }
+ ice_clear_interrupt_scheme(pf);
+
+ pci_wake_from_d3(pdev, pf->wol_ena);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+/**
+ * ice_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ */
+static int __maybe_unused ice_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum ice_reset_req reset_type;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!pci_device_is_present(pdev))
+ return -ENODEV;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot enable device after suspend\n");
+ return ret;
+ }
+
+ pf = pci_get_drvdata(pdev);
+ hw = &pf->hw;
+
+ pf->wakeup_reason = rd32(hw, PFPM_WUS);
+ ice_print_wake_reason(pf);
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ ret = ice_reinit_interrupt_scheme(pf);
+ if (ret)
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+
+ clear_bit(__ICE_DOWN, pf->state);
+ /* Now perform PF reset and rebuild */
+ reset_type = ICE_RESET_PFR;
+ /* re-enable service task for reset, but allow reset to schedule it */
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (ice_schedule_reset(pf, reset_type))
+ dev_err(dev, "Reset during resume failed.\n");
+
+ clear_bit(__ICE_SUSPENDED, pf->state);
+ ice_service_task_restart(pf);
+
+ /* Restart the service task */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
* ice_pci_err_detected - warning that PCI error has been detected
* @pdev: PCI device information struct
* @err: the type of PCI error
@@ -3673,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
+ ice_restore_all_vfs_msi_state(pdev);
+
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -3742,6 +4751,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
.slot_reset = ice_pci_err_slot_reset,
@@ -3755,6 +4766,10 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &ice_pm_ops,
+#endif /* CONFIG_PM */
+ .shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.err_handler = &ice_pci_err_handler
};
@@ -3769,7 +4784,7 @@ static int __init ice_module_init(void)
{
int status;
- pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+ pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
@@ -4275,6 +5290,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
+ vsi->rx_gro_dropped = 0;
rcu_read_lock();
@@ -4289,6 +5305,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
@@ -4320,7 +5337,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
@@ -4963,10 +5980,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (err)
goto err_sched_init_port;
- err = ice_update_link_info(hw->port_info);
- if (err)
- dev_err(dev, "Get link status error %d\n", err);
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
@@ -5667,20 +6680,30 @@ int ice_open(struct net_device *netdev)
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- err = ice_force_phys_link_state(vsi, true);
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+ if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
+ err = ice_init_phy_user_cfg(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = ice_configure_phy(vsi);
if (err) {
netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
} else {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
err = ice_aq_set_link_restart_an(pi, false, NULL);
if (err) {
netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
vsi->vsi_num, err);
return err;
}
- set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
}
err = ice_vsi_open(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index b049c1c30c88..5903a36763de 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -108,6 +108,76 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
}
/**
+ * ice_aq_update_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands (0x0703)
+ */
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_aq_erase_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @cd: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands (0x0702)
+ */
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -172,8 +242,7 @@ void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -197,7 +266,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-static enum ice_status
+enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
@@ -635,3 +704,119 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
return status;
}
+
+/**
+ * ice_nvm_write_activate
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash (0x0707)
+ */
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+{
+ struct ice_aqc_nvm *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.nvm;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
+
+ cmd->cmd_flags = cmd_flags;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_nvm_update_empr
+ * @hw: pointer to the HW struct
+ *
+ * Update empr (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ */
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/* ice_nvm_set_pkg_data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set package data (0x070A). This command is equivalent to the reception
+ * of a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ */
+
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pkg_data *cmd;
+ struct ice_aq_desc desc;
+
+ if (length != 0 && !data)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pkg_data;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/* ice_nvm_pass_component_tbl
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B AQC.
+ * @comp_response_code: a pointer to the response code from the 0x070B AQC.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Pass component table (0x070B). This command is equivalent to the reception
+ * of a PLDM FW Update PassComponentTable cmd. This command should be sent once
+ * per component. It can be only sent after Set Package Data cmd and before
+ * actual update. FW will assume these commands are going to be sent until
+ * the TransferFlag is set to End or StartAndEnd.
+ */
+
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_pass_comp_tbl *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || !comp_response || !comp_response_code)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+
+ if (!status) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 165eda07b93d..8d430909f846 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -11,6 +11,26 @@ enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
+enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
+enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
+enum ice_status
+ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
+ u16 length, struct ice_sq_cd *cd);
+enum ice_status
+ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code, struct ice_sq_cd *cd);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 0475134295e4..44a228530253 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -129,7 +129,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*/
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
@@ -149,8 +149,8 @@ enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
+ struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
- struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
enum ice_status status;
struct ice_hw *hw;
@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
- /* query the current node information from FW before additing it
+ /* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
@@ -195,7 +195,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
- memcpy(&node->info, &elem.generic[0], sizeof(node->info));
+ node->info = elem;
return 0;
}
@@ -238,7 +238,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
enum ice_status status;
u16 buf_size;
- buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -423,7 +423,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*/
static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_conf_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
@@ -443,8 +443,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
* Suspend scheduling elements (0x0409)
*/
static enum ice_status
-ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
@@ -464,8 +463,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
* resume scheduling elements (0x040A)
*/
static enum ice_status
-ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
@@ -506,9 +504,9 @@ static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
- struct ice_aqc_suspend_resume_elem *buf;
u16 i, buf_size, num_elem_ret = 0;
enum ice_status status;
+ __le32 *buf;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
@@ -516,7 +514,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_nodes; i++)
- buf->teid[i] = cpu_to_le32(node_teids[i]);
+ buf[i] = cpu_to_le32(node_teids[i]);
if (suspend)
status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
@@ -580,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
- * @opcode:opcode for add, query, or remove profile(s)
+ * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -591,7 +589,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*/
static enum ice_status
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
- u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
@@ -622,13 +620,11 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*/
static enum ice_status
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_added,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_added, struct ice_sq_cd *cd)
{
- return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_added, cd);
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
+ buf, buf_size, num_profiles_added, cd);
}
/**
@@ -644,13 +640,12 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*/
static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_removed,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_removed, struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_removed, cd);
+ num_profiles, buf, buf_size,
+ num_profiles_removed, cd);
}
/**
@@ -666,7 +661,7 @@ static enum ice_status
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
enum ice_status status;
u16 num_profiles = 1;
@@ -675,8 +670,7 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
return ICE_ERR_IN_USE;
/* Safe to remove profile ID */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_info->profile;
+ buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
@@ -831,7 +825,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
size_t buf_size;
u32 teid;
- buf_size = struct_size(buf, generic, num_nodes - 1);
+ buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1282,6 +1276,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
+ * @pi: port information structure
+ * @vsi_node: software VSI handle
+ * @qgrp_node: first queue group node identified for scanning
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
+ * of queues currently assigned.
+ */
+static struct ice_sched_node *
+ice_sched_get_free_qgrp(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node,
+ struct ice_sched_node *qgrp_node, u8 owner)
+{
+ struct ice_sched_node *min_qgrp;
+ u8 min_children;
+
+ if (!qgrp_node)
+ return qgrp_node;
+ min_children = qgrp_node->num_children;
+ if (!min_children)
+ return qgrp_node;
+ min_qgrp = qgrp_node;
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
+ * equal number of shares and active. The bandwidth will be equally
+ * distributed across all queues.
+ */
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < min_children &&
+ qgrp_node->owner == owner) {
+ /* replace the new min queue group node */
+ min_qgrp = qgrp_node;
+ min_children = min_qgrp->num_children;
+ /* break if it has no children, */
+ if (!min_children)
+ break;
+ }
+ qgrp_node = qgrp_node->sibling;
+ }
+ return min_qgrp;
+}
+
+/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1294,7 +1335,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
- struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@@ -1308,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
- goto lan_q_exit;
+ return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@@ -1321,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
-lan_q_exit:
- return qgrp_node;
+ /* Select the best queue group */
+ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@@ -1867,7 +1908,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* @node: pointer to node
* @info: node info to update
*
- * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * Update the HW DB, and local SW DB of node. Update the scheduling
* parameters of node from argument info data buffer (Info->data buf) and
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
@@ -1876,18 +1917,18 @@ static enum ice_status
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
- struct ice_aqc_conf_elem buf;
+ struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
- buf.generic[0] = *info;
+ buf = *info;
/* Parent TEID is reserved field in this aq call */
- buf.generic[0].parent_teid = 0;
+ buf.parent_teid = 0;
/* Element type is reserved field in this aq call */
- buf.generic[0].data.elem_type = 0;
+ buf.data.elem_type = 0;
/* Flags is reserved field in this aq call */
- buf.generic[0].data.flags = 0;
+ buf.data.flags = 0;
/* Update HW DB */
/* Configure element node */
@@ -2131,9 +2172,9 @@ static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info *pi,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
+ struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
@@ -2159,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
- rl_prof_elem->bw == bw)
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@@ -2182,8 +2223,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
/* Create new entry in HW DB */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_prof_elem->profile;
+ buf = &rl_prof_elem->profile;
status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&profiles_added, NULL);
if (status || profiles_added != num_profiles)
@@ -2391,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type &&
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
@@ -2553,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return 0;
return ice_sched_rm_rl_profile(pi, layer_num,
- rl_prof_info->profile.flags,
- old_id);
+ rl_prof_info->profile.flags &
+ ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index f0593cfb6521..0e55ae0d446f 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -56,7 +56,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ff7d16ac693e..c3a6c41385ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -29,25 +29,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
+ (DUMMY_ETH_HDR_LEN * \
+ sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lg_act) - \
- sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_vsi_list) - \
- sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
@@ -87,7 +79,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* @num_elems: pointer to number of elements
* @cd: pointer to command details structure or NULL
*
- * Get switch configuration (0x0200) to be placed in 'buff'.
+ * Get switch configuration (0x0200) to be placed in buf.
* This admin command returns information such as initial VSI/port number
* and switch ID it belongs to.
*
@@ -104,13 +96,13 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* parsing the response buffer.
*/
static enum ice_status
-ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -449,7 +441,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
+ buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@@ -503,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
@@ -514,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
cpu_to_le16(num_rules);
- return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ status = ICE_ERR_DOES_NOT_EXIST;
+
+ return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
@@ -550,7 +548,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
*/
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
{
- struct ice_aqc_get_sw_cfg_resp *rbuf;
+ struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
@@ -568,19 +566,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
* writing a non-zero value in req_desc
*/
do {
+ struct ice_aqc_get_sw_cfg_resp_elem *ele;
+
status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
&req_desc, &num_elems, NULL);
if (status)
break;
- for (i = 0; i < num_elems; i++) {
- struct ice_aqc_get_sw_cfg_resp_elem *ele;
+ for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
u8 res_type;
- ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
@@ -856,8 +854,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
- ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
@@ -2037,7 +2034,8 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
@@ -2691,7 +2689,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Allocate resource */
- buf_len = sizeof(*buf);
+ buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -2729,7 +2727,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Free resource */
- buf_len = sizeof(*buf);
+ buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index abdb137c8bb7..9d0d6b0025cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
-static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
- unsigned int size)
+static unsigned int
+ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
+ if (likely(page))
return true;
- }
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -1254,12 +1251,12 @@ construct_skb:
* @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > ICE_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (ICE_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > ICE_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e70c4619edc3..51b4df7a59d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,7 +193,7 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 page_reuse_count;
+ u64 gro_dropped; /* GRO returned dropped */
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..bc2f4390b51d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
+ /* this is tracked separately to help us debug stack drops */
+ rx_ring->rx_stats.gro_dropped++;
+ netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
+ rx_ring->q_index);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index c1ad8622e65c..4cdccfadf274 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -87,6 +87,12 @@ enum ice_fc_mode {
ICE_FC_DFLT
};
+enum ice_phy_cache_mode {
+ ICE_FC_MODE = 0,
+ ICE_SPEED_MODE,
+ ICE_FEC_MODE
+};
+
enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
@@ -94,6 +100,14 @@ enum ice_fec_mode {
ICE_FEC_AUTO
};
+struct ice_phy_cache_mode_data {
+ union {
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ u16 curr_user_speed_req;
+ } data;
+};
+
enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET,
@@ -104,6 +118,7 @@ enum ice_set_fc_aq_failures {
/* Various MAC types */
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
+ ICE_MAC_E810,
ICE_MAC_GENERIC,
};
@@ -160,6 +175,13 @@ struct ice_phy_info {
u64 phy_type_high;
enum ice_media_type media_type;
u8 get_link_info;
+ /* Please refer to struct ice_aqc_get_link_status_data to get
+ * detail of enable bit in curr_user_speed_req
+ */
+ u16 curr_user_speed_req;
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
};
/* protocol enumeration for filters */
@@ -222,6 +244,15 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
+
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define ICE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define ICE_NVM_PENDING_OROM BIT(1)
+#define ICE_NVM_PENDING_NETLIST BIT(2)
+ bool nvm_unified_update;
+#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
/* Function specific capabilities */
@@ -290,7 +321,29 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
- u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+struct ice_link_default_override_tlv {
+ u8 options;
+#define ICE_LINK_OVERRIDE_OPT_M 0x3F
+#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0)
+#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1)
+#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2)
+#define ICE_LINK_OVERRIDE_EN BIT(3)
+#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4)
+#define ICE_LINK_OVERRIDE_EEE_EN BIT(5)
+ u8 phy_config;
+#define ICE_LINK_OVERRIDE_PHY_CFG_S 8
+#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
+#define ICE_LINK_OVERRIDE_PAUSE_M 0x3
+#define ICE_LINK_OVERRIDE_LESM_EN BIT(6)
+#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7)
+ u8 fec_options;
+#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF
+ u8 rsvd1;
+ u64 phy_type_low;
+ u64 phy_type_high;
};
#define ICE_NVM_VER_LEN 32
@@ -356,7 +409,7 @@ enum ice_rl_type {
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
-#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
@@ -444,6 +497,7 @@ struct ice_dcb_app_priority_table {
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
#define ICE_CEE_APP_SEL_TCPIP 0x1
struct ice_dcbx_cfg {
@@ -709,6 +763,7 @@ struct ice_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_SR_NVM_WOL_CFG 0x19
#define ICE_NVM_OROM_VER_OFF 0x02
#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
@@ -725,7 +780,21 @@ struct ice_hw_port_stats {
#define ICE_OROM_VER_SHIFT 24
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
+#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_1ST_OROM_BANK_PTR 0x44
+#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+
+/* Link override related */
+#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
+#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2
+#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2
+#define ICE_FW_API_LINK_OVERRIDE_MAJ 1
+#define ICE_FW_API_LINK_OVERRIDE_MIN 5
+#define ICE_FW_API_LINK_OVERRIDE_PATCH 2
+
#define ICE_SR_WORDS_IN_1KB 512
/* Hash redirection LUT for VSI - maximum array size */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 16a2f2526ccc..71497776ac62 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
@@ -1593,31 +1595,6 @@ err_unroll_intr:
}
/**
- * ice_pf_state_is_nominal - checks the PF for nominal state
- * @pf: pointer to PF to check
- *
- * Check the PF's state for a collection of bits that would indicate
- * the PF is in a state that would inhibit normal operation for
- * driver functionality.
- *
- * Returns true if PF is in a nominal state.
- * Returns false otherwise
- */
-static bool ice_pf_state_is_nominal(struct ice_pf *pf)
-{
- DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
-
- if (!pf)
- return false;
-
- bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
- if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
- return false;
-
- return true;
-}
-
-/**
* ice_pci_sriov_ena - Enable or change number of VFs
* @pf: pointer to the PF structure
* @num_vfs: number of VFs to allocate
@@ -2997,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size;
}
- /* VF can request to configure less than allocated queues
- * or default allocated queues. So update the VSI with new number
+ /* VF can request to configure less than allocated queues or default
+ * allocated queues. So update the VSI with new number
*/
vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq;
@@ -4096,3 +4073,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
}
}
}
+
+/**
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
+ *
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ struct pci_dev *vfdev;
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 67aa9110fdd1..0f519fba3770 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -32,6 +32,7 @@
#define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -114,6 +115,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -146,6 +148,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
+#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index b6f928c9e9c9..20ac5fca68c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -206,12 +206,14 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
- qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ size = struct_size(qg_buf, txqs, 1);
+ qg_buf = kzalloc(size, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
@@ -228,7 +230,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, sizeof(*qg_buf));
+ memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
@@ -296,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-
/**
* ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI
@@ -592,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
-
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
@@ -704,8 +704,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -781,12 +779,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
- if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
- else
- xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
- }
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 438b42ce2cd9..a32391e82762 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
dev_spec->sgmii_active = true;
break;
}
- /* fall through - for I2C based SGMII */
+ fallthrough; /* for I2C based SGMII */
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
/* read media type from SFP EEPROM */
ret_val = igb_set_sfp_media_type_82575(hw);
@@ -1704,7 +1704,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
- /* fall through */
+ fallthrough;
default:
if (hw->mac.type == e1000_82575 ||
hw->mac.type == e1000_82576) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 09f4dcb09632..fa136e6e9328 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -721,7 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
igb_read_invm_version(hw, fw_vers);
return;
}
- /* fall through */
+ fallthrough;
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2125e5a7f7..8c8eb82e6272 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -659,7 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_AUTO_X_1000T;
break;
}
- /* fall through */
+ fallthrough;
case 0:
default:
phy_data |= M88E1000_PSCR_AUTO_X_MODE;
@@ -2621,7 +2621,7 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
- /* fall-through */
+ fallthrough;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 0c9282e2aaec..2f015b60a995 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -642,7 +642,6 @@ enum igb_boards {
};
extern char igb_driver_name[];
-extern char igb_driver_version[];
int igb_open(struct net_device *netdev);
int igb_close(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2cd003c5ad43..6e8231c1ddf0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -851,7 +851,6 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
@@ -1783,8 +1782,8 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size /= 2;
memset(&skb->data[frame_size], 0xAA, frame_size - 1);
- memset(&skb->data[frame_size + 10], 0xBE, 1);
- memset(&skb->data[frame_size + 12], 0xAF, 1);
+ skb->data[frame_size + 10] = 0xBE;
+ skb->data[frame_size + 12] = 0xAF;
}
static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
@@ -2518,11 +2517,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2532,11 +2531,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8bb3db2cbd41..4f05f6efe6af 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -38,12 +38,6 @@
#include <linux/i2c.h>
#include "igb.h"
-#define MAJ 5
-#define MIN 6
-#define BUILD 0
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-__stringify(BUILD) "-k"
-
enum queue_mode {
QUEUE_MODE_STRICT_PRIORITY,
QUEUE_MODE_STREAM_RESERVATION,
@@ -55,7 +49,6 @@ enum tx_queue_prio {
};
char igb_driver_name[] = "igb";
-char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
@@ -240,7 +233,6 @@ static struct pci_driver igb_driver = {
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -666,8 +658,7 @@ static int __init igb_init_module(void)
{
int ret;
- pr_info("%s - version %s\n",
- igb_driver_string, igb_driver_version);
+ pr_info("%s\n", igb_driver_string);
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -720,14 +711,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
- /* Fall through */
+ fallthrough;
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
- /* Fall through */
+ fallthrough;
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -2882,7 +2873,7 @@ void igb_set_fw_version(struct igb_adapter *adapter)
fw.invm_img_type);
break;
}
- /* fall through */
+ fallthrough;
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
@@ -3733,13 +3724,13 @@ unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
max_rss_queues = 1;
break;
}
- /* fall through */
+ fallthrough;
case e1000_82576:
if (!!adapter->vfs_allocated_count) {
max_rss_queues = 2;
break;
}
- /* fall through */
+ fallthrough;
case e1000_82580:
case e1000_i354:
default:
@@ -4878,14 +4869,14 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
/* VLAN filtering needed for VLAN prio filter */
if (adapter->netdev->features & NETIF_F_NTUPLE)
break;
- /* fall through */
+ fallthrough;
case e1000_82576:
case e1000_82580:
case e1000_i354:
/* VLAN filtering needed for pool filtering */
if (adapter->vfs_allocated_count)
break;
- /* fall through */
+ fallthrough;
default:
return 1;
}
@@ -5165,7 +5156,7 @@ bool igb_has_link(struct igb_adapter *adapter)
case e1000_media_type_copper:
if (!hw->mac.get_link_status)
return true;
- /* fall through */
+ fallthrough;
case e1000_media_type_internal_serdes:
hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
@@ -5825,7 +5816,7 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -5837,7 +5828,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -6224,9 +6215,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6715,7 +6715,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
igb_setup_dca(adapter);
break;
}
- /* Fall Through - since DCA is disabled. */
+ fallthrough; /* since DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
/* without this a class_device is left
@@ -7168,7 +7168,7 @@ static void igb_flush_mac_table(struct igb_adapter *adapter)
for (i = 0; i < hw->mac.rar_entry_count; i++) {
adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
adapter->mac_table[i].queue = 0;
igb_rar_set_index(adapter, i);
}
@@ -7317,7 +7317,7 @@ static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
} else {
adapter->mac_table[i].state = 0;
adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ eth_zero_addr(adapter->mac_table[i].addr);
}
igb_rar_set_index(adapter, i);
@@ -9384,13 +9384,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
- /* Fall through */
+ fallthrough;
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
- /* Fall through */
+ fallthrough;
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c39e921757ba..490368d3d03c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1053,7 +1053,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- /* fall through */
+ fallthrough;
default:
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 9217d150e286..f4835eb62fee 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -170,8 +170,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, igbvf_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index eee26a3be90b..975eb47ee04d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -281,7 +281,6 @@ enum igbvf_state_t {
};
extern char igbvf_driver_name[];
-extern const char igbvf_driver_version[];
void igbvf_check_options(struct igbvf_adapter *);
void igbvf_set_ethtool_ops(struct net_device *);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 5b1800c3ba82..19269f5d52bc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -24,9 +24,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.4.0-k"
char igbvf_driver_name[] = "igbvf";
-const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
"Intel(R) Gigabit Virtual Function Network Driver";
static const char igbvf_copyright[] =
@@ -2093,7 +2091,7 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -2105,7 +2103,7 @@ csum_failed:
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -2459,13 +2457,10 @@ static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igbvf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct igbvf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
netif_device_detach(netdev);
@@ -2475,31 +2470,16 @@ static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
igbvf_free_irq(adapter);
}
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
- pci_disable_device(pdev);
-
return 0;
}
-#ifdef CONFIG_PM
-static int igbvf_resume(struct pci_dev *pdev)
+static int __maybe_unused igbvf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct igbvf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_restore_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
-
pci_set_master(pdev);
if (netif_running(netdev)) {
@@ -2517,11 +2497,10 @@ static int igbvf_resume(struct pci_dev *pdev)
return 0;
}
-#endif
static void igbvf_shutdown(struct pci_dev *pdev)
{
- igbvf_suspend(pdev, PMSG_SUSPEND);
+ igbvf_suspend(&pdev->dev);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2962,17 +2941,15 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
+static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
.name = igbvf_driver_name,
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igbvf_suspend,
- .resume = igbvf_resume,
-#endif
+ .driver.pm = &igbvf_pm_ops,
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
@@ -2987,7 +2964,7 @@ static int __init igbvf_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+ pr_info("%s\n", igbvf_driver_string);
pr_info("%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
@@ -3011,6 +2988,5 @@ module_exit(igbvf_exit_module);
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
/* netdev.c */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 5dbc5a156626..3070dfdb7eb4 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -117,6 +117,9 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
+ struct ethtool_eee eee;
+ u16 eee_advert;
+
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
@@ -207,8 +210,6 @@ struct igc_adapter {
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
- unsigned long last_rx_ptp_check;
- unsigned long last_rx_timestamp;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
@@ -239,7 +240,6 @@ void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
extern char igc_driver_name[];
-extern char igc_driver_version[];
#define IGC_REGS_LEN 740
@@ -256,6 +256,7 @@ extern char igc_driver_version[];
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
+#define IGC_FLAG_EEE BIT(14)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
@@ -546,7 +547,6 @@ void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 186deb1d9375..f1f464967f87 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -323,7 +323,6 @@
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
-#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
@@ -384,7 +383,6 @@
#define IGC_FTQF_MASK_PROTO_BP 0x10000000
/* Time Sync Receive Control bit definitions */
-#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00
#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02
@@ -511,4 +509,41 @@
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
+/* EEE defines */
+#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */
+#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+
+/* LTR defines */
+#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */
+#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */
+#define IGC_TW_SYSTEM_1000_MASK 0x000000FF
+/* Minimum time for 100BASE-T where no data will be transmit following move out
+ * of EEE LPI Tx state
+ */
+#define IGC_TW_SYSTEM_100_MASK 0x0000FF00
+#define IGC_TW_SYSTEM_100_SHIFT 8
+#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+#define IGC_DMACR_DMACTHR_MASK 0x00FF0000
+#define IGC_DMACR_DMACTHR_SHIFT 16
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMINV_SCALE_1024 2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMINV_SCALE_32768 3
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMAXV_SCALE_1024 2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMAXV_SCALE_32768 3
+#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */
+#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */
+#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
+#define IGC_LTRMINV_SCALE_SHIFT 10
+#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */
+#define IGC_LTRMAXV_SCALE_SHIFT 10
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index a938ec8db681..44410c2265d6 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -4,6 +4,7 @@
/* ethtool support for igc */
#include <linux/if_vlan.h>
#include <linux/pm_runtime.h>
+#include <linux/mdio.h>
#include "igc.h"
#include "igc_diag.h"
@@ -130,7 +131,6 @@ static void igc_ethtool_get_drvinfo(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version));
/* add fw_version here */
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
@@ -1015,37 +1015,29 @@ static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V4_FLOW:
if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V4_FLOW:
- /* Fall through */
case AH_ESP_V4_FLOW:
- /* Fall through */
case AH_V4_FLOW:
- /* Fall through */
case ESP_V4_FLOW:
- /* Fall through */
case IPV4_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case UDP_V6_FLOW:
if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ fallthrough;
case SCTP_V6_FLOW:
- /* Fall through */
case AH_ESP_V6_FLOW:
- /* Fall through */
case AH_V6_FLOW:
- /* Fall through */
case ESP_V6_FLOW:
- /* Fall through */
case IPV6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
@@ -1549,6 +1541,98 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
return 0;
}
+static int igc_ethtool_get_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 eeer;
+
+ if (hw->dev_spec._base.eee_enable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ *edata = adapter->eee;
+ edata->supported = SUPPORTED_Autoneg;
+
+ eeer = rd32(IGC_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & IGC_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ if (eeer & IGC_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ edata->eee_enabled = hw->dev_spec._base.eee_enable;
+
+ edata->advertised = SUPPORTED_Autoneg;
+ edata->lp_advertised = SUPPORTED_Autoneg;
+
+ /* Report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+
+static int igc_ethtool_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
+ ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
+ if (ret_val) {
+ netdev_err(netdev,
+ "Problem setting EEE advertisement options\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ netdev_err(netdev,
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI timer is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ netdev_err(netdev,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+ } else if (!edata->eee_enabled) {
+ netdev_err(netdev,
+ "Setting EEE options are not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
+ hw->dev_spec._base.eee_enable = edata->eee_enabled;
+ adapter->flags |= IGC_FLAG_EEE;
+
+ /* reset link */
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ else
+ igc_reset(adapter);
+ }
+
+ return 0;
+}
+
static int igc_ethtool_begin(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1830,6 +1914,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_channels = igc_ethtool_set_channels,
.get_priv_flags = igc_ethtool_get_priv_flags,
.set_priv_flags = igc_ethtool_set_priv_flags,
+ .get_eee = igc_ethtool_get_eee,
+ .set_eee = igc_ethtool_set_eee,
.begin = igc_ethtool_begin,
.complete = igc_ethtool_complete,
.get_link_ksettings = igc_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index af34ae310327..b9fe51b91c47 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -82,13 +82,7 @@ struct igc_mac_info {
enum igc_mac_type type;
- u32 collision_delta;
- u32 ledctl_default;
- u32 ledctl_mode1;
- u32 ledctl_mode2;
u32 mc_filter_type;
- u32 tx_packet_delta;
- u32 txcw;
u16 mta_reg_count;
u16 uta_reg_count;
@@ -98,8 +92,6 @@ struct igc_mac_info {
u8 forced_speed_duplex;
- bool adaptive_ifs;
- bool has_fwsm;
bool asf_firmware_present;
bool arc_subsystem_valid;
@@ -191,6 +183,7 @@ struct igc_fc_info {
struct igc_dev_spec_base {
bool clear_semaphore_once;
+ bool eee_enable;
};
struct igc_hw {
@@ -275,21 +268,9 @@ struct igc_hw_stats {
u64 tsctc;
u64 tsctfc;
u64 iac;
- u64 icrxptc;
- u64 icrxatc;
- u64 ictxptc;
- u64 ictxatc;
- u64 ictxqec;
- u64 ictxqmtc;
- u64 icrxdmtc;
- u64 icrxoc;
- u64 cbtmpc;
u64 htdpmc;
- u64 cbrdpc;
- u64 cbrmpc;
u64 rpthc;
u64 hgptc;
- u64 htcbdpc;
u64 hgorc;
u64 hgotc;
u64 lenerrs;
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index c25f555aaf82..8b67d9b49a83 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -488,3 +488,159 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
}
return 0;
}
+
+/**
+ * igc_set_eee_i225 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @adv2p5G: boolean flag enabling 2.5G EEE advertisement
+ * @adv1G: boolean flag enabling 1G EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ **/
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+ bool adv100M)
+{
+ u32 ipcnfg, eeer;
+
+ ipcnfg = rd32(IGC_IPCNFG);
+ eeer = rd32(IGC_EEER);
+
+ /* enable or disable per user setting */
+ if (hw->dev_spec._base.eee_enable) {
+ u32 eee_su = rd32(IGC_EEE_SU);
+
+ if (adv100M)
+ ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
+
+ if (adv2p5G)
+ ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
+ else
+ ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
+
+ eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+ IGC_EEER_LPI_FC);
+
+ /* This bit should not be set in normal operation. */
+ if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
+ hw_dbg("LPI Clock Stop Bit should not be set!\n");
+ } else {
+ ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
+ IGC_IPCNFG_EEE_100M_AN);
+ eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+ IGC_EEER_LPI_FC);
+ }
+ wr32(IGC_IPCNFG, ipcnfg);
+ wr32(IGC_EEER, eeer);
+ rd32(IGC_IPCNFG);
+ rd32(IGC_EEER);
+
+ return IGC_SUCCESS;
+}
+
+/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
+ * settings, otherwise specify that there is no LTR requirement.
+ */
+s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
+{
+ u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
+ u16 speed, duplex;
+ s32 size;
+
+ /* If we do not have link, LTR thresholds are zero. */
+ if (link) {
+ hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+
+ /* Check if using copper interface with EEE enabled or if the
+ * link speed is 10 Mbps.
+ */
+ if (hw->dev_spec._base.eee_enable &&
+ speed != SPEED_10) {
+ /* EEE enabled, so send LTRMAX threshold. */
+ ltrc = rd32(IGC_LTRC) |
+ IGC_LTRC_EEEMS_EN;
+ wr32(IGC_LTRC, ltrc);
+
+ /* Calculate tw_system (nsec). */
+ if (speed == SPEED_100) {
+ tw_system = ((rd32(IGC_EEE_SU) &
+ IGC_TW_SYSTEM_100_MASK) >>
+ IGC_TW_SYSTEM_100_SHIFT) * 500;
+ } else {
+ tw_system = (rd32(IGC_EEE_SU) &
+ IGC_TW_SYSTEM_1000_MASK) * 500;
+ }
+ } else {
+ tw_system = 0;
+ }
+
+ /* Get the Rx packet buffer size. */
+ size = rd32(IGC_RXPBS) &
+ IGC_RXPBS_SIZE_I225_MASK;
+
+ /* Calculations vary based on DMAC settings. */
+ if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
+ size -= (rd32(IGC_DMACR) &
+ IGC_DMACR_DMACTHR_MASK) >>
+ IGC_DMACR_DMACTHR_SHIFT;
+ /* Convert size to bits. */
+ size *= 1024 * 8;
+ } else {
+ /* Convert size to bytes, subtract the MTU, and then
+ * convert the size to bits.
+ */
+ size *= 1024;
+ size *= 8;
+ }
+
+ if (size < 0) {
+ hw_dbg("Invalid effective Rx buffer size %d\n",
+ size);
+ return -IGC_ERR_CONFIG;
+ }
+
+ /* Calculate the thresholds. Since speed is in Mbps, simplify
+ * the calculation by multiplying size/speed by 1000 for result
+ * to be in nsec before dividing by the scale in nsec. Set the
+ * scale such that the LTR threshold fits in the register.
+ */
+ ltr_min = (1000 * size) / speed;
+ ltr_max = ltr_min + tw_system;
+ scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
+ IGC_LTRMINV_SCALE_32768;
+ scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
+ IGC_LTRMAXV_SCALE_32768;
+ ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
+ ltr_min -= 1;
+ ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
+ ltr_max -= 1;
+
+ /* Only write the LTR thresholds if they differ from before. */
+ ltrv = rd32(IGC_LTRMINV);
+ if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
+ ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
+ (scale_min << IGC_LTRMINV_SCALE_SHIFT);
+ wr32(IGC_LTRMINV, ltrv);
+ }
+
+ ltrv = rd32(IGC_LTRMAXV);
+ if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
+ ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
+ (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ wr32(IGC_LTRMAXV, ltrv);
+ }
+ }
+
+ return IGC_SUCCESS;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h
index 7b66e1f9c0e6..dae47e4f16b0 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.h
+++ b/drivers/net/ethernet/intel/igc/igc_i225.h
@@ -9,5 +9,8 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
s32 igc_init_nvm_params_i225(struct igc_hw *hw);
bool igc_get_flash_presence_i225(struct igc_hw *hw);
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+ bool adv100M);
+s32 igc_set_ltr_i225(struct igc_hw *hw, bool link);
#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 410aeb01de5c..09cd0ec7ee87 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -289,25 +289,18 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_TNCRS);
rd32(IGC_HTDPMC);
rd32(IGC_TSCTC);
- rd32(IGC_TSCTFC);
rd32(IGC_MGTPRC);
rd32(IGC_MGTPDC);
rd32(IGC_MGTPTC);
rd32(IGC_IAC);
- rd32(IGC_ICRXOC);
-
- rd32(IGC_ICRXPTC);
- rd32(IGC_ICRXATC);
- rd32(IGC_ICTXPTC);
- rd32(IGC_ICTXATC);
- rd32(IGC_ICTXQEC);
- rd32(IGC_ICTXQMTC);
- rd32(IGC_ICRXDMTC);
rd32(IGC_RPTHC);
+ rd32(IGC_TLPIC);
+ rd32(IGC_RLPIC);
rd32(IGC_HGPTC);
+ rd32(IGC_RXDMTC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
@@ -362,8 +355,8 @@ void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
s32 igc_check_for_copper_link(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
+ bool link = false;
s32 ret_val;
- bool link;
/* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
@@ -417,6 +410,11 @@ s32 igc_check_for_copper_link(struct igc_hw *hw)
hw_dbg("Error configuring flow control\n");
out:
+ /* Now that we are aware of our link settings, we can set the LTR
+ * thresholds.
+ */
+ ret_val = igc_set_ltr_i225(hw, link);
+
return ret_val;
}
@@ -462,10 +460,8 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
- if (mac->autoneg_failed) {
- if (hw->phy.media_type == igc_media_type_copper)
- ret_val = igc_force_mac_fc(hw);
- }
+ if (mac->autoneg_failed)
+ ret_val = igc_force_mac_fc(hw);
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
@@ -477,7 +473,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
- if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
+ if (mac->autoneg) {
/* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 6919c50e449a..7a6f2a0d413f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -17,7 +17,6 @@
#include "igc_hw.h"
#include "igc_tsn.h"
-#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -27,12 +26,10 @@ static int debug = -1;
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
char igc_driver_name[] = "igc";
-char igc_driver_version[] = DRV_VERSION;
static const char igc_driver_string[] = DRV_SUMMARY;
static const char igc_copyright[] =
"Copyright(c) 2018 Intel Corporation.";
@@ -64,16 +61,6 @@ enum latency_range {
latency_invalid = 255
};
-/**
- * igc_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igc_power_down_link(struct igc_adapter *adapter)
-{
- if (adapter->hw.phy.media_type == igc_media_type_copper)
- igc_power_down_phy_copper_base(&adapter->hw);
-}
-
void igc_reset(struct igc_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
@@ -105,8 +92,11 @@ void igc_reset(struct igc_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
netdev_err(dev, "Error on hardware initialization\n");
+ /* Re-establish EEE setting */
+ igc_set_eee_i225(hw, true, true, true);
+
if (!netif_running(adapter->netdev))
- igc_power_down_link(adapter);
+ igc_power_down_phy_copper_base(&adapter->hw);
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
@@ -125,8 +115,7 @@ static void igc_power_up_link(struct igc_adapter *adapter)
{
igc_reset_phy(&adapter->hw);
- if (adapter->hw.phy.media_type == igc_media_type_copper)
- igc_power_up_phy_copper(&adapter->hw);
+ igc_power_up_phy_copper(&adapter->hw);
igc_setup_link(&adapter->hw);
}
@@ -980,7 +969,7 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -992,7 +981,7 @@ csum_failed:
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -1479,9 +1468,9 @@ static inline void igc_rx_hash(struct igc_ring *ring,
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
*
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
- * other fields within the skb.
+ * This function checks the ring, descriptor, and packet information in order
+ * to populate the hash, checksum, VLAN, protocol, and other fields within the
+ * skb.
*/
static void igc_process_skb_fields(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
@@ -1491,10 +1480,6 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
- if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) &&
- !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))
- igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
-
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1975,7 +1960,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
/* probably a little skewed due to removing CRC */
total_bytes += skb->len;
- /* populate checksum, timestamp, VLAN, and protocol */
+ /* populate checksum, VLAN, and protocol */
igc_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&q_vector->napi, skb);
@@ -3284,7 +3269,6 @@ static void igc_cache_ring_register(struct igc_adapter *adapter)
switch (adapter->hw.mac.type) {
case igc_i225:
- /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
@@ -3744,17 +3728,8 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
adapter->stats.tsctc += rd32(IGC_TSCTC);
- adapter->stats.tsctfc += rd32(IGC_TSCTFC);
adapter->stats.iac += rd32(IGC_IAC);
- adapter->stats.icrxoc += rd32(IGC_ICRXOC);
- adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
- adapter->stats.icrxatc += rd32(IGC_ICRXATC);
- adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
- adapter->stats.ictxatc += rd32(IGC_ICTXATC);
- adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
- adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
- adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
/* Fill out the OS statistics structure */
net_stats->multicast = adapter->stats.mprc;
@@ -4255,6 +4230,15 @@ static void igc_watchdog_task(struct work_struct *work)
(ctrl & IGC_CTRL_RFCE) ? "RX" :
(ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
+ /* disable EEE if enabled */
+ if ((adapter->flags & IGC_FLAG_EEE) &&
+ adapter->link_duplex == HALF_DUPLEX) {
+ netdev_info(netdev,
+ "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
+ adapter->hw.dev_spec._base.eee_enable = false;
+ adapter->flags &= ~IGC_FLAG_EEE;
+ }
+
/* check if SmartSpeed worked */
igc_check_downshift(hw);
if (phy->speed_downgraded)
@@ -4611,7 +4595,7 @@ err_set_queues:
igc_free_irq(adapter);
err_req_irq:
igc_release_hw_control(adapter);
- igc_power_down_link(adapter);
+ igc_power_down_phy_copper_base(&adapter->hw);
igc_free_all_rx_resources(adapter);
err_setup_rx:
igc_free_all_tx_resources(adapter);
@@ -5185,6 +5169,10 @@ static int igc_probe(struct pci_dev *pdev,
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* Disable EEE for internal PHY devices */
+ hw->dev_spec._base.eee_enable = false;
+ adapter->flags &= ~IGC_FLAG_EEE;
+ igc_set_eee_i225(hw, false, false, false);
pm_runtime_put_noidle(&pdev->dev);
@@ -5305,7 +5293,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
wake = wufc || adapter->en_mng_pt;
if (!wake)
- igc_power_down_link(adapter);
+ igc_power_down_phy_copper_base(&adapter->hw);
else
igc_power_up_link(adapter);
@@ -5614,9 +5602,7 @@ static int __init igc_init_module(void)
{
int ret;
- pr_info("%s - version %s\n",
- igc_driver_string, igc_driver_version);
-
+ pr_info("%s\n", igc_driver_string);
pr_info("%s\n", igc_copyright);
ret = pci_register_driver(&igc_driver);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d746f8588c8..e67d4655b47e 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -205,78 +205,66 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
-/**
- * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
- * @q_vector: Pointer to interrupt specific structure
- * @skb: Buffer containing timestamp and packet
- *
- * This function is meant to retrieve a timestamp from the internal registers
- * of the adapter and store it in the skb.
- */
-void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector,
- struct sk_buff *skb)
+static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter)
{
- struct igc_adapter *adapter = q_vector->adapter;
struct igc_hw *hw = &adapter->hw;
- u64 regval;
-
- /* If this bit is set, then the RX registers contain the time
- * stamp. No other packet will be time stamped until we read
- * these registers, so read the registers to make them
- * available again. Because only one packet can be time
- * stamped at a time, we know that the register values must
- * belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
- *
- * If nothing went wrong, then it should have a shared
- * tx_flags that we can turn into a skb_shared_hwtstamps.
- */
- if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID))
- return;
+ u32 val;
+ int i;
- regval = rd32(IGC_RXSTMPL);
- regval |= (u64)rd32(IGC_RXSTMPH) << 32;
+ wr32(IGC_TSYNCRXCTL, 0);
- igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ val = rd32(IGC_SRRCTL(i));
+ val &= ~IGC_SRRCTL_TIMESTAMP;
+ wr32(IGC_SRRCTL(i), val);
+ }
- /* Update the last_rx_timestamp timer in order to enable watchdog check
- * for error case of latched timestamp on a dropped packet.
- */
- adapter->last_rx_timestamp = jiffies;
+ val = rd32(IGC_RXPBS);
+ val &= ~IGC_RXPBS_CFG_TS_EN;
+ wr32(IGC_RXPBS, val);
}
-/**
- * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue
- * @rx_ring: Pointer to RX queue
- * @timer: Index for timer
- *
- * This function enables RX timestamping for a queue, and selects
- * which 1588 timer will provide the timestamp.
- */
-static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter,
- struct igc_ring *rx_ring, u8 timer)
+static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
- int reg_idx = rx_ring->reg_idx;
- u32 srrctl = rd32(IGC_SRRCTL(reg_idx));
+ u32 val;
+ int i;
+
+ val = rd32(IGC_RXPBS);
+ val |= IGC_RXPBS_CFG_TS_EN;
+ wr32(IGC_RXPBS, val);
- srrctl |= IGC_SRRCTL_TIMESTAMP;
- srrctl |= IGC_SRRCTL_TIMER1SEL(timer);
- srrctl |= IGC_SRRCTL_TIMER0SEL(timer);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ val = rd32(IGC_SRRCTL(i));
+ /* FIXME: For now, only support retrieving RX timestamps from
+ * timer 0.
+ */
+ val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+ IGC_SRRCTL_TIMESTAMP;
+ wr32(IGC_SRRCTL(i), val);
+ }
- wr32(IGC_SRRCTL(reg_idx), srrctl);
+ val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |
+ IGC_TSYNCRXCTL_RXSYNSIG;
+ wr32(IGC_TSYNCRXCTL, val);
}
-static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
- u8 timer)
+static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
{
- int i;
+ struct igc_hw *hw = &adapter->hw;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igc_ring *ring = adapter->rx_ring[i];
+ wr32(IGC_TSYNCTXCTL, 0);
+}
- igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer);
- }
+static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG);
+
+ /* Read TXSTMP registers to discard any timestamp previously stored. */
+ rd32(IGC_TXSTMPL);
+ rd32(IGC_TXSTMPH);
}
/**
@@ -284,37 +272,21 @@ static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
* @adapter: networking device structure
* @config: hwtstamp configuration
*
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
- u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
- struct igc_hw *hw = &adapter->hw;
- u32 tsync_rx_cfg = 0;
- bool is_l4 = false;
- u32 regval;
-
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- tsync_tx_ctl = 0;
+ igc_ptp_disable_tx_timestamp(adapter);
+ break;
case HWTSTAMP_TX_ON:
+ igc_ptp_enable_tx_timestamp(adapter);
break;
default:
return -ERANGE;
@@ -322,18 +294,10 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl = 0;
+ igc_ptp_disable_rx_timestamp(adapter);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = true;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = true;
- break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -343,99 +307,36 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l4 = true;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+ igc_ptp_enable_rx_timestamp(adapter);
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
- /* fall through */
default:
- config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
- /* Per-packet timestamping only works if all packets are
- * timestamped, so enable timestamping in all packets as long
- * as one Rx filter was configured.
- */
- if (tsync_rx_ctl) {
- tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
- tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
- tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
- config->rx_filter = HWTSTAMP_FILTER_ALL;
- is_l4 = true;
-
- if (hw->mac.type == igc_i225) {
- regval = rd32(IGC_RXPBS);
- regval |= IGC_RXPBS_CFG_TS_EN;
- wr32(IGC_RXPBS, regval);
-
- /* FIXME: For now, only support retrieving RX
- * timestamps from timer 0
- */
- igc_ptp_enable_tstamp_all_rxqueues(adapter, 0);
- }
- }
-
- if (tsync_tx_ctl) {
- tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
- tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG;
- }
-
- /* enable/disable TX */
- regval = rd32(IGC_TSYNCTXCTL);
- regval &= ~IGC_TSYNCTXCTL_ENABLED;
- regval |= tsync_tx_ctl;
- wr32(IGC_TSYNCTXCTL, regval);
-
- /* enable/disable RX */
- regval = rd32(IGC_TSYNCRXCTL);
- regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK);
- regval |= tsync_rx_ctl;
- wr32(IGC_TSYNCRXCTL, regval);
-
- /* define which PTP packets are time stamped */
- wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
-
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IPPROTO_UDP /* UDP */
- | IGC_FTQF_VF_BP /* VF not compared */
- | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */
- | IGC_FTQF_MASK); /* mask all inputs */
- ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
- wr32(IGC_IMIR(3), htons(PTP_EV_PORT));
- wr32(IGC_IMIREXT(3),
- (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP));
- wr32(IGC_FTQF(3), ftqf);
- } else {
- wr32(IGC_FTQF(3), IGC_FTQF_MASK);
- }
- wrfl();
+ return 0;
+}
- /* clear TX/RX time stamp registers, just to be sure */
- regval = rd32(IGC_TXSTMPL);
- regval = rd32(IGC_TXSTMPH);
- regval = rd32(IGC_RXSTMPL);
- regval = rd32(IGC_RXSTMPH);
+static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
- return 0;
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
+ rd32(IGC_TXSTMPH);
+ netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
}
void igc_ptp_tx_hang(struct igc_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
IGC_PTP_TX_TIMEOUT);
- struct igc_hw *hw = &adapter->hw;
-
- if (!adapter->ptp_tx_skb)
- return;
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
@@ -446,15 +347,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
*/
if (timeout) {
cancel_work_sync(&adapter->ptp_tx_work);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- adapter->tx_hwtstamp_timeouts++;
- /* Clear the Tx valid bit in TSYNCTXCTL register to enable
- * interrupt
- */
- rd32(IGC_TXSTMPH);
- netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
+ igc_ptp_tx_timeout(adapter);
}
}
@@ -473,6 +366,9 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
struct igc_hw *hw = &adapter->hw;
u64 regval;
+ if (WARN_ON_ONCE(!skb))
+ return;
+
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
@@ -504,20 +400,12 @@ static void igc_ptp_tx_work(struct work_struct *work)
struct igc_hw *hw = &adapter->hw;
u32 tsynctxctl;
- if (!adapter->ptp_tx_skb)
+ if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
if (time_is_before_jiffies(adapter->ptp_tx_start +
IGC_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- adapter->tx_hwtstamp_timeouts++;
- /* Clear the tx valid bit in TSYNCTXCTL register to enable
- * interrupt
- */
- rd32(IGC_TXSTMPH);
- netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
+ igc_ptp_tx_timeout(adapter);
return;
}
@@ -634,11 +522,9 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
return;
cancel_work_sync(&adapter->ptp_tx_work);
- if (adapter->ptp_tx_skb) {
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
- clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- }
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 232e82dec62e..b52dd9d737e8 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -58,16 +58,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* Interrupt Cause */
-#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */
-#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */
-#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */
-#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */
-#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */
-#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */
-#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
-#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
-
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
@@ -181,13 +171,10 @@
#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
-#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
-#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
-#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
-#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
-#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
+#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */
+#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */
#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
@@ -228,8 +215,6 @@
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
-#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
-#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
@@ -248,6 +233,17 @@
/* Wake Up packet memory */
#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+/* Energy Efficient Ethernet "EEE" registers */
+#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define IGC_EEE_SU 0x0E34 /* EEE Setup */
+
+/* LTR registers */
+#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */
+#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
+#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */
+
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 681d44cc9784..81ac39576803 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -163,7 +163,6 @@ enum ixgb_state_t {
void ixgb_check_options(struct ixgb_adapter *adapter);
void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
-extern const char ixgb_driver_version[];
void ixgb_set_speed_duplex(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index c65eb1afc8fb..582099a5ad41 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -458,8 +458,6 @@ ixgb_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, ixgb_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ixgb_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index b64e91ea3465..46829cfd54df 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -9,9 +9,6 @@
char ixgb_driver_name[] = "ixgb";
static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
-#define DRIVERNAPI "-NAPI"
-#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
-const char ixgb_driver_version[] = DRV_VERSION;
static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
#define IXGB_CB_LENGTH 256
@@ -103,7 +100,6 @@ static struct pci_driver ixgb_driver = {
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -120,7 +116,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init
ixgb_init_module(void)
{
- pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_driver_string);
pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5ddfc83a1e46..1e8a809233a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -588,11 +588,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
-#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
-#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
@@ -606,7 +604,6 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
-#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -846,7 +843,6 @@ extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
extern char ixgbe_driver_name[];
-extern const char ixgbe_driver_version[];
#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index eee277c1bedf..95c92fe890a1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1098,7 +1098,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
/* Setup the last four at 48KB...don't re-init i */
rxpktsize = IXGBE_RXPBSIZE_48KB;
- /* Fall Through */
+ fallthrough;
case PBA_STRATEGY_EQUAL:
default:
/* Divide the remaining Rx packet buffer evenly among the TCs */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 109f8de5a1c2..8d3798a32f0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1568,7 +1568,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case 0x0000:
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- /* fall through */
+ fallthrough;
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
@@ -1576,7 +1576,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case 0xE000:
/* mask VLAN ID only */
fdirm |= IXGBE_FDIRM_VLANID;
- /* fall through */
+ fallthrough;
case 0xEFFF:
/* no VLAN fields masked */
break;
@@ -1589,7 +1589,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case 0x0000:
/* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
- /* fall through */
+ fallthrough;
case 0xFFFF:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 17357a12cbdc..62ddb452f862 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -145,7 +145,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
- /* fall through - only backplane uses autoc */
+ fallthrough; /* only backplane uses autoc */
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3533,7 +3533,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* fall through - configure remaining packet buffers */
+ fallthrough; /* configure remaining packet buffers */
case (PBA_STRATEGY_EQUAL):
/* Divide the remaining Rx packet buffer evenly among the TCs */
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c6bf0a50ee63..71ec908266a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -142,32 +142,71 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
-/* currently supported speeds for 10G */
-#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
- SUPPORTED_10000baseKX4_Full | \
- SUPPORTED_10000baseKR_Full)
-
#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
-static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
+static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!ixgbe_isbackplane(hw->phy.media_type)) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+ return;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ ethtool_link_ksettings_add_link_mode
+ (cmd, supported, 10000baseKX4_Full);
+ break;
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ ethtool_link_ksettings_add_link_mode
+ (cmd, supported, 10000baseKR_Full);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode
+ (cmd, supported, 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode
+ (cmd, supported, 10000baseKR_Full);
+ break;
+ }
+}
+
+static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
+ struct ethtool_link_ksettings *cmd)
{
- if (!ixgbe_isbackplane(hw->phy.media_type))
- return SUPPORTED_10000baseT_Full;
+ if (!ixgbe_isbackplane(hw->phy.media_type)) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+ return;
+ }
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_X550EM_X_KX4:
- return SUPPORTED_10000baseKX4_Full;
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 10000baseKX4_Full);
+ break;
case IXGBE_DEV_ID_82598_BX:
case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_XFI:
- return SUPPORTED_10000baseKR_Full;
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 10000baseKR_Full);
+ break;
default:
- return SUPPORTED_10000baseKX4_Full |
- SUPPORTED_10000baseKR_Full;
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 10000baseKR_Full);
+ break;
}
}
@@ -178,52 +217,88 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
- u32 supported, advertising;
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
- if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- supported |= ixgbe_get_supported_10gtypes(hw);
- if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
- SUPPORTED_1000baseKX_Full :
- SUPPORTED_1000baseT_Full;
- if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- supported |= SUPPORTED_100baseT_Full;
- if (supported_link & IXGBE_LINK_SPEED_10_FULL)
- supported |= SUPPORTED_10baseT_Full;
-
- /* default advertised speed if phy.autoneg_advertised isn't set */
- advertising = supported;
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
+ ixgbe_set_supported_10gtypes(hw, cmd);
+ ixgbe_set_advertising_10gtypes(hw, cmd);
+ }
+ if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 5000baseT_Full);
+
+ if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 2500baseT_Full);
+
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
+ if (ixgbe_isbackplane(hw->phy.media_type)) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseKX_Full);
+ } else {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+ }
+ }
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+ }
+ if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ }
+
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
- advertising = 0;
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- advertising |= ADVERTISED_10baseT_Full;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- advertising |= ADVERTISED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- advertising |= supported & ADVRTSD_MSK_10G;
+ ixgbe_set_advertising_10gtypes(hw, cmd);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
- if (supported & SUPPORTED_1000baseKX_Full)
- advertising |= ADVERTISED_1000baseKX_Full;
+ if (ethtool_link_ksettings_test_link_mode
+ (cmd, supported, 1000baseKX_Full))
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 1000baseKX_Full);
else
- advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 1000baseT_Full);
}
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 5000baseT_Full);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 2500baseT_Full);
} else {
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode
+ (cmd, advertising, 10000baseT_Full);
}
}
if (autoneg) {
- supported |= SUPPORTED_Autoneg;
- advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
cmd->base.autoneg = AUTONEG_ENABLE;
} else
cmd->base.autoneg = AUTONEG_DISABLE;
@@ -235,13 +310,13 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
- supported |= SUPPORTED_TP;
- advertising |= ADVERTISED_TP;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
cmd->base.port = PORT_TP;
break;
case ixgbe_phy_qt:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
@@ -260,8 +335,10 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
@@ -272,61 +349,76 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
- supported |= SUPPORTED_TP;
- advertising |= ADVERTISED_TP;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ TP);
cmd->base.port = PORT_TP;
break;
case ixgbe_sfp_type_unknown:
default:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ FIBRE);
cmd->base.port = PORT_OTHER;
break;
}
/* Indicate pause support */
- supported |= SUPPORTED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
- advertising |= ADVERTISED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
break;
case ixgbe_fc_rx_pause:
- advertising |= ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
case ixgbe_fc_tx_pause:
- advertising |= ADVERTISED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
default:
- advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(cmd, advertising,
+ Asym_Pause);
}
if (netif_carrier_ok(netdev)) {
@@ -358,11 +450,6 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -373,12 +460,6 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
s32 err = 0;
- u32 supported, advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -386,29 +467,41 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (advertising & ~supported)
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
return -EINVAL;
/* only allow one speed at a time if no autoneg */
if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
- if (advertising ==
- (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full))
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full) &&
+ ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
return -EINVAL;
}
old = hw->phy.autoneg_advertised;
advertised = 0;
- if (advertising & ADVERTISED_10000baseT_Full)
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (advertising & ADVERTISED_1000baseT_Full)
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 5000baseT_Full))
+ advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 2500baseT_Full))
+ advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- if (advertising & ADVERTISED_100baseT_Full)
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Full))
advertised |= IXGBE_LINK_SPEED_100_FULL;
- if (advertising & ADVERTISED_10baseT_Full)
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Full))
advertised |= IXGBE_LINK_SPEED_10_FULL;
if (old == advertised)
@@ -429,7 +522,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
u32 speed = cmd->base.speed;
if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
- (advertising != ADVERTISED_10000baseT_Full) ||
+ (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full)) ||
(speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -1004,8 +1098,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
@@ -1859,8 +1951,8 @@ static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size >>= 1;
memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size + 10], 0xBE, 1);
- memset(&skb->data[frame_size + 12], 0xAF, 1);
+ skb->data[frame_size + 10] = 0xBE;
+ skb->data[frame_size + 12] = 0xAF;
}
static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
@@ -2086,7 +2178,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IXGBE_TESTING,
&adapter->state);
- goto skip_ol_tests;
+ return;
}
}
}
@@ -2158,9 +2250,6 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
-
-skip_ol_tests:
- msleep_interruptible(4 * 1000);
}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
@@ -2509,11 +2598,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2523,11 +2612,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fallthrough */
+ fallthrough;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2659,7 +2748,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
}
- /* fall through */
+ fallthrough;
default:
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ec7a11d13fdc..e67b1a59ecb7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -5,6 +5,7 @@
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
+#include <generated/utsrelease.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
@@ -443,7 +444,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
ddp->err = (__force u32)ddp_err;
ddp->sgl = NULL;
ddp->sgc = 0;
- /* fall through */
+ fallthrough;
/* if DDP length is present pass it through to ULD */
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
/* update length of DDPed data */
@@ -1001,7 +1002,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
sizeof(info->driver_version),
"%s v%s",
ixgbe_driver_name,
- ixgbe_driver_version);
+ UTS_RELEASE);
/* Firmware Version */
strlcpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 113f6087c7c9..6516980965a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -427,7 +427,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -477,7 +477,7 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
@@ -560,7 +560,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
**/
static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
@@ -745,7 +745,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index fd9f5d41b594..2e35c5706cf1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
@@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
@@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
@@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f162b8b8f345..2f8a4cfc5fa1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -28,6 +28,7 @@
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
#include <linux/numa.h>
+#include <generated/utsrelease.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -56,8 +57,6 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "5.1.0-k"
-const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -165,7 +164,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
@@ -1397,7 +1395,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
- /* fall through - DCA is disabled. */
+ fallthrough; /* DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
@@ -2231,10 +2229,10 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
@@ -3009,7 +3007,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP1(hw);
mask |= IXGBE_EIMS_GPI_SDP2(hw);
- /* fall through */
+ fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -3315,7 +3313,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
- /* Fall through */
+ fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -4337,7 +4335,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
- /* fall through */
+ fallthrough;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -4996,24 +4994,41 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
napi_disable(&adapter->q_vector[q_idx]->napi);
}
-static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
+static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 vxlanctrl;
+ struct udp_tunnel_info ti;
- if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
- IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
- return;
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ adapter->vxlan_port = ti.port;
+ else
+ adapter->geneve_port = ti.port;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
+ ntohs(adapter->vxlan_port) |
+ ntohs(adapter->geneve_port) <<
+ IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
+ return 0;
+}
- if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
- adapter->vxlan_port = 0;
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
+ .sync_table = ixgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
- if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
- adapter->geneve_port = 0;
-}
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
+ .sync_table = ixgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
#ifdef CONFIG_IXGBE_DCB
/**
@@ -5503,9 +5518,13 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
+ IXGBE_LINK_SPEED_2_5GB_FULL);
+ }
+
if (ret)
return ret;
@@ -5887,7 +5906,7 @@ dma_engine_disable:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- /* fall through */
+ fallthrough;
default:
break;
}
@@ -6330,7 +6349,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_x550em_a:
- adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
@@ -6339,7 +6357,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
default:
break;
}
- /* fall through */
+ fallthrough;
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
@@ -6350,14 +6368,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->fcoe.up = 0;
#endif /* IXGBE_DCB */
#endif /* IXGBE_FCOE */
- /* Fall Through */
+ fallthrough;
case ixgbe_mac_X550:
if (hw->mac.type == ixgbe_mac_X550)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
- adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
break;
default:
break;
@@ -6796,8 +6813,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
- ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
- udp_tunnel_get_rx_info(netdev);
+ udp_tunnel_nic_reset_ntf(netdev);
return 0;
@@ -6861,32 +6877,20 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_resume(struct pci_dev *pdev)
+static int __maybe_unused ixgbe_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
adapter->hw.hw_addr = adapter->io_addr;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /*
- * pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- e_dev_err("Cannot enable PCI device from suspend\n");
- return err;
- }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
- pci_wake_from_d3(pdev, false);
+ device_wakeup_disable(dev_d);
ixgbe_reset(adapter);
@@ -6904,7 +6908,6 @@ static int ixgbe_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
@@ -6913,9 +6916,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl;
u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -6926,12 +6926,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
if (hw->mac.ops.stop_link_on_d3)
hw->mac.ops.stop_link_on_d3(hw);
@@ -6986,26 +6980,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ixgbe_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
bool wake;
retval = __ixgbe_shutdown(pdev, &wake);
- if (retval)
- return retval;
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ device_set_wakeup_enable(dev_d, wake);
- return 0;
+ return retval;
}
-#endif /* CONFIG_PM */
static void ixgbe_shutdown(struct pci_dev *pdev)
{
@@ -7051,7 +7037,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7072,15 +7061,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
@@ -7162,7 +7156,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
- /* fall through */
+ fallthrough;
case ixgbe_mac_82599EB:
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
@@ -7911,12 +7905,6 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
- if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
- rtnl_lock();
- adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- udp_tunnel_get_rx_info(adapter->netdev);
- rtnl_unlock();
- }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -8071,7 +8059,7 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -8083,7 +8071,7 @@ csum_failed:
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -8526,7 +8514,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
- /* fall through */
+ fallthrough;
default:
return netdev_pick_tx(dev, skb, sb_dev);
}
@@ -8860,7 +8848,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
- /* fall through */
+ fallthrough;
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -9774,26 +9762,6 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
- if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM) {
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- } else {
- u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- }
- }
-
- if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM) {
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- } else {
- u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- }
- }
-
if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
ixgbe_reset_l2fw_offload(adapter);
else if (need_reset)
@@ -9805,118 +9773,6 @@ static int ixgbe_set_features(struct net_device *netdev,
return 1;
}
-/**
- * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_hw *hw = &adapter->hw;
- __be16 port = ti->port;
- u32 port_shift = 0;
- u32 reg;
-
- if (ti->sa_family != AF_INET)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->vxlan_port == port)
- return;
-
- if (adapter->vxlan_port) {
- netdev_info(dev,
- "VXLAN port %d set, not adding port %d\n",
- ntohs(adapter->vxlan_port),
- ntohs(port));
- return;
- }
-
- adapter->vxlan_port = port;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->geneve_port == port)
- return;
-
- if (adapter->geneve_port) {
- netdev_info(dev,
- "GENEVE port %d set, not adding port %d\n",
- ntohs(adapter->geneve_port),
- ntohs(port));
- return;
- }
-
- port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
- adapter->geneve_port = port;
- break;
- default:
- return;
- }
-
- reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
-}
-
-/**
- * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- u32 port_mask;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
- ti->type != UDP_TUNNEL_TYPE_GENEVE)
- return;
-
- if (ti->sa_family != AF_INET)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->vxlan_port != ti->port) {
- netdev_info(dev, "VXLAN port %d not found\n",
- ntohs(ti->port));
- return;
- }
-
- port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->geneve_port != ti->port) {
- netdev_info(dev, "GENEVE port %d not found\n",
- ntohs(ti->port));
- return;
- }
-
- port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
- break;
- default:
- return;
- }
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-}
-
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
@@ -10304,10 +10160,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = adapter->xdp_prog ?
- adapter->xdp_prog->aux->id : 0;
- return 0;
case XDP_SETUP_XSK_UMEM:
return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
xdp->xsk.queue_id);
@@ -10406,8 +10258,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
- .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
@@ -10650,7 +10502,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
/* only support first port */
if (hw->bus.func != 0)
break;
- /* fall through */
+ fallthrough;
case IXGBE_SUBDEV_ID_82599_SP_560FLR:
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
@@ -10852,6 +10704,18 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
+ break;
+ case ixgbe_mac_x550em_a:
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
+ break;
+ default:
+ break;
+ }
+
/* Make sure the SWFW semaphore is in a valid state */
if (hw->mac.ops.init_swfw_sync)
hw->mac.ops.init_swfw_sync(hw);
@@ -11146,8 +11010,8 @@ skip_sriov:
*/
if (hw->mac.ops.set_fw_drv_ver)
hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
- sizeof(ixgbe_driver_version) - 1,
- ixgbe_driver_version);
+ sizeof(UTS_RELEASE) - 1,
+ UTS_RELEASE);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -11167,10 +11031,14 @@ skip_sriov:
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
- ixgbe_mii_bus_init(hw);
+ err = ixgbe_mii_bus_init(hw);
+ if (err)
+ goto err_netdev;
return 0;
+err_netdev:
+ unregister_netdev(netdev);
err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -11481,16 +11349,15 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
+static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+
static struct pci_driver ixgbe_driver = {
- .name = ixgbe_driver_name,
- .id_table = ixgbe_pci_tbl,
- .probe = ixgbe_probe,
- .remove = ixgbe_remove,
-#ifdef CONFIG_PM
- .suspend = ixgbe_suspend,
- .resume = ixgbe_resume,
-#endif
- .shutdown = ixgbe_shutdown,
+ .name = ixgbe_driver_name,
+ .id_table = ixgbe_pci_tbl,
+ .probe = ixgbe_probe,
+ .remove = ixgbe_remove,
+ .driver.pm = &ixgbe_pm_ops,
+ .shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
@@ -11504,7 +11371,7 @@ static struct pci_driver ixgbe_driver = {
static int __init ixgbe_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_driver_string);
pr_info("%s\n", ixgbe_copyright);
ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2fb97967961c..7980d7265e10 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,7 +905,6 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
- int err = -ENODEV;
bus = devm_mdiobus_alloc(dev);
if (!bus)
@@ -923,7 +922,7 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!ixgbe_x550em_a_has_mii(hw))
- goto ixgbe_no_mii_bus;
+ return -ENODEV;
bus->read = &ixgbe_x550em_a_mii_bus_read;
bus->write = &ixgbe_x550em_a_mii_bus_write;
break;
@@ -948,15 +947,8 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- err = mdiobus_register(bus);
- if (!err) {
- adapter->mii_bus = bus;
- return 0;
- }
-
-ixgbe_no_mii_bus:
- devm_mdiobus_free(dev, bus);
- return err;
+ adapter->mii_bus = bus;
+ return mdiobus_register(bus);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 0be13a90ff79..22a874eee2e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1051,7 +1051,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
- /* fall through */
+ fallthrough;
default:
/*
* register RXMTRL must be set in order to do V1 packets,
@@ -1242,7 +1242,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
cc.mult = 3;
cc.shift = 2;
}
- /* fallthrough */
+ fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d05a5690e66b..988db46bff0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -503,7 +503,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (pf_max_frame > ETH_FRAME_LEN)
break;
- /* fall through */
+ fallthrough;
default:
/* If the PF or VF are running w/ jumbo frames enabled
* we need to shut down the VF Rx path as we cannot
@@ -783,7 +783,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
ETH_ALEN);
else
- memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
+ eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses);
return retval;
}
@@ -1141,7 +1141,7 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
/* promisc introduced in 1.3 version */
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
- /* Fall through */
+ fallthrough;
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9c42f741ed5e..5e339afa682a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -306,7 +306,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
- /* Fallthrough */
+ fallthrough;
case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -325,7 +325,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
- /* Fallthrough */
+ fallthrough;
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -2303,7 +2303,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
break;
}
}
- /* fall through */
+ fallthrough;
default:
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
@@ -2885,7 +2885,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
* through to the fc_full statement. Later, we will
* disable the adapter's ability to send PAUSE frames.
*/
- /* Fallthrough */
+ fallthrough;
case ixgbe_fc_full:
pause = true;
asm_dir = true;
@@ -3284,7 +3284,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_SGMII:
case IXGBE_DEV_ID_X550EM_A_SGMII_L:
hw->phy.type = ixgbe_phy_sgmii;
- /* Fallthrough */
+ fallthrough;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
case IXGBE_DEV_ID_X550EM_X_XFI:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index be9d2a8da515..ec7121f352e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -120,10 +120,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 988fa49fa99a..e49fb1cd9a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -218,8 +218,6 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ixgbevf_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index ecab686574b6..a0e325774819 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -440,7 +440,6 @@ extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
-extern const char ixgbevf_driver_version[];
int ixgbevf_open(struct net_device *netdev);
int ixgbevf_close(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a39e2cb384dd..a428113e6d54 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -38,8 +38,6 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "4.1.0-k"
-const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2018 Intel Corporation.";
@@ -81,7 +79,6 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
@@ -1082,10 +1079,10 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBEVF_XDP_CONSUMED;
break;
@@ -2605,7 +2602,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
+ * fall through conditions.
*
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
@@ -3877,7 +3874,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -3889,7 +3886,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto no_csum;
@@ -4300,13 +4297,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -4317,37 +4311,16 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
ixgbevf_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
- if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
- pci_disable_device(pdev);
-
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbevf_resume(struct pci_dev *pdev)
+static int __maybe_unused ixgbevf_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_restore_state(pdev);
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
-
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
@@ -4368,10 +4341,9 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static void ixgbevf_shutdown(struct pci_dev *pdev)
{
- ixgbevf_suspend(pdev, PMSG_SUSPEND);
+ ixgbevf_suspend(&pdev->dev);
}
static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
@@ -4505,15 +4477,9 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
-
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = adapter->xdp_prog ?
- adapter->xdp_prog->aux->id : 0;
- return 0;
default:
return -EINVAL;
}
@@ -4891,16 +4857,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
+static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = ixgbevf_remove,
-#ifdef CONFIG_PM
+
/* Power Management Hooks */
- .suspend = ixgbevf_suspend,
- .resume = ixgbevf_resume,
-#endif
+ .driver.pm = &ixgbevf_pm_ops,
+
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
};
@@ -4913,9 +4880,7 @@ static struct pci_driver ixgbevf_driver = {
**/
static int __init ixgbevf_init_module(void)
{
- pr_info("%s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
-
+ pr_info("%s\n", ixgbevf_driver_string);
pr_info("%s\n", ixgbevf_copyright);
ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
if (!ixgbevf_wq) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d5ce49636548..bfe6dfcec4ab 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -314,7 +314,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
}
@@ -382,7 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
break;
- /* fall through */
+ fallthrough;
default:
return -EOPNOTSUPP;
}
@@ -540,7 +540,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/* promisc introduced in 1.3 version */
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
- /* Fall threw */
+ fallthrough;
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
break;