summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bond_options.c8
-rw-r--r--drivers/net/ethernet/3com/typhoon.c53
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c29
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c22
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c31
-rw-r--r--drivers/net/ethernet/dlink/sundance.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c11
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c25
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c26
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ipa/gsi.c111
-rw-r--r--drivers/net/ipa/ipa_endpoint.c31
-rw-r--r--drivers/net/ipa/ipa_main.c5
-rw-r--r--drivers/net/ipa/ipa_reg.h60
-rw-r--r--drivers/net/ipa/ipa_uc.c10
-rw-r--r--drivers/net/xen-netback/common.h4
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c7
-rw-r--r--drivers/net/xen-netback/rx.c15
-rw-r--r--drivers/net/xen-netback/xenbus.c34
-rw-r--r--drivers/net/xen-netfront.c336
-rw-r--r--include/net/bonding.h5
-rw-r--r--include/xen/interface/io/netif.h20
-rw-r--r--net/packet/af_packet.c2
29 files changed, 651 insertions, 304 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7dcb465824be..1368d1d6a114 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -495,6 +495,7 @@ config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
select XEN_XENBUS_FRONTEND
+ select PAGE_POOL
default y
help
This driver provides support for Xen paravirtual network
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b3479584cc16..2adf6ce20a38 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -434,6 +434,9 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
struct net_device *slave_dev = curr_active->dev;
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ return true;
+
if (!(slave_dev->xfrmdev_ops
&& slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
@@ -1218,11 +1221,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
-#ifdef CONFIG_XFRM_OFFLOAD
-#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
- NETIF_F_GSO_ESP)
-#endif /* CONFIG_XFRM_OFFLOAD */
-
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_ALL_TSO)
@@ -4654,8 +4652,7 @@ void bond_setup(struct net_device *bond_dev)
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
- if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
- bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
+ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
bond->xs = NULL;
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -4678,11 +4675,15 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
#ifdef CONFIG_XFRM_OFFLOAD
- if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
- bond_dev->hw_features |= BOND_XFRM_FEATURES;
+ bond_dev->hw_features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+#ifdef CONFIG_XFRM_OFFLOAD
+ /* Disable XFRM features if this isn't an active-backup config */
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ bond_dev->features &= ~BOND_XFRM_FEATURES;
+#endif /* CONFIG_XFRM_OFFLOAD */
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ddb3916d3506..9abfaae1c6f7 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -767,6 +767,14 @@ static int bond_option_mode_set(struct bonding *bond,
if (newval->value == BOND_MODE_ALB)
bond->params.tlb_dynamic_lb = 1;
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (newval->value == BOND_MODE_ACTIVEBACKUP)
+ bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+ else
+ bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+ netdev_change_features(bond->dev);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
/* don't cache arp_validate between modes */
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 5ed33c2c4742..d3b30bacc94e 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1801,9 +1801,8 @@ typhoon_free_rx_rings(struct typhoon *tp)
}
static int
-typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
+typhoon_sleep_early(struct typhoon *tp, __le16 events)
{
- struct pci_dev *pdev = tp->pdev;
void __iomem *ioaddr = tp->ioaddr;
struct cmd_desc xp_cmd;
int err;
@@ -1832,20 +1831,29 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
*/
netif_carrier_off(tp->dev);
+ return 0;
+}
+
+static int
+typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
+{
+ int err;
+
+ err = typhoon_sleep_early(tp, events);
+
+ if (err)
+ return err;
+
pci_enable_wake(tp->pdev, state, 1);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, state);
+ pci_disable_device(tp->pdev);
+ return pci_set_power_state(tp->pdev, state);
}
static int
typhoon_wakeup(struct typhoon *tp, int wait_type)
{
- struct pci_dev *pdev = tp->pdev;
void __iomem *ioaddr = tp->ioaddr;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
/* Post 2.x.x versions of the Sleep Image require a reset before
* we can download the Runtime Image. But let's not make users of
* the old firmware pay for the reset.
@@ -2049,6 +2057,9 @@ typhoon_open(struct net_device *dev)
if (err)
goto out;
+ pci_set_power_state(tp->pdev, PCI_D0);
+ pci_restore_state(tp->pdev);
+
err = typhoon_wakeup(tp, WaitSleep);
if (err < 0) {
netdev_err(dev, "unable to wakeup device\n");
@@ -2114,11 +2125,10 @@ typhoon_close(struct net_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int
-typhoon_resume(struct pci_dev *pdev)
+static int __maybe_unused
+typhoon_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct typhoon *tp = netdev_priv(dev);
/* If we're down, resume when we are upped.
@@ -2144,9 +2154,10 @@ reset:
return -EBUSY;
}
-static int
-typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+typhoon_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *dev = pci_get_drvdata(pdev);
struct typhoon *tp = netdev_priv(dev);
struct cmd_desc xp_cmd;
@@ -2190,18 +2201,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
goto need_resume;
}
- if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
+ if (typhoon_sleep_early(tp, tp->wol_events) < 0) {
netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
+ device_wakeup_enable(dev_d);
+
return 0;
need_resume:
- typhoon_resume(pdev);
+ typhoon_resume(dev_d);
return -EBUSY;
}
-#endif
static int
typhoon_test_mmio(struct pci_dev *pdev)
@@ -2533,15 +2545,14 @@ typhoon_remove_one(struct pci_dev *pdev)
free_netdev(dev);
}
+static SIMPLE_DEV_PM_OPS(typhoon_pm_ops, typhoon_suspend, typhoon_resume);
+
static struct pci_driver typhoon_driver = {
.name = KBUILD_MODNAME,
.id_table = typhoon_pci_tbl,
.probe = typhoon_init_one,
.remove = typhoon_remove_one,
-#ifdef CONFIG_PM
- .suspend = typhoon_suspend,
- .resume = typhoon_resume,
-#endif
+ .driver.pm = &typhoon_pm_ops,
};
static int __init
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 77d78b4c59c4..e500f0c05725 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -699,30 +699,18 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ne2k_pci_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
netif_device_detach(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int ne2k_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused ne2k_pci_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- int rc;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
+ struct net_device *dev = dev_get_drvdata(dev_d);
NS8390_init(dev, 1);
netif_device_attach(dev);
@@ -730,19 +718,14 @@ static int ne2k_pci_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(ne2k_pci_pm_ops, ne2k_pci_suspend, ne2k_pci_resume);
static struct pci_driver ne2k_driver = {
.name = DRV_NAME,
.probe = ne2k_pci_init_one,
.remove = ne2k_pci_remove_one,
.id_table = ne2k_pci_tbl,
-#ifdef CONFIG_PM
- .suspend = ne2k_pci_suspend,
- .resume = ne2k_pci_resume,
-#endif
-
+ .driver.pm = &ne2k_pci_pm_ops,
};
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index a64191fc2af9..ba0055bb1614 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1984,28 +1984,21 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-#ifdef CONFIG_PM
-static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused starfire_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
if (netif_running(dev)) {
netif_device_detach(dev);
netdev_close(dev);
}
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev,state));
-
return 0;
}
-static int starfire_resume(struct pci_dev *pdev)
+static int __maybe_unused starfire_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
if (netif_running(dev)) {
netdev_open(dev);
@@ -2014,8 +2007,6 @@ static int starfire_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
-
static void starfire_remove_one(struct pci_dev *pdev)
{
@@ -2040,15 +2031,13 @@ static void starfire_remove_one(struct pci_dev *pdev)
free_netdev(dev); /* Will also free np!! */
}
+static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
static struct pci_driver starfire_driver = {
.name = DRV_NAME,
.probe = starfire_init_one,
.remove = starfire_remove_one,
-#ifdef CONFIG_PM
- .suspend = starfire_suspend,
- .resume = starfire_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &starfire_pm_ops,
.id_table = starfire_pci_tbl,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index dda4b8fc9525..91be3ffa1c5c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4420,13 +4420,12 @@ static void ena_shutdown(struct pci_dev *pdev)
__ena_shutoff(pdev, true);
}
-#ifdef CONFIG_PM
/* ena_suspend - PM suspend callback
- * @pdev: PCI device information struct
- * @state:power state
+ * @dev_d: Device information struct
*/
-static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ena_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct ena_adapter *adapter = pci_get_drvdata(pdev);
u64_stats_update_begin(&adapter->syncp);
@@ -4445,12 +4444,11 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
}
/* ena_resume - PM resume callback
- * @pdev: PCI device information struct
- *
+ * @dev_d: Device information struct
*/
-static int ena_resume(struct pci_dev *pdev)
+static int __maybe_unused ena_resume(struct device *dev_d)
{
- struct ena_adapter *adapter = pci_get_drvdata(pdev);
+ struct ena_adapter *adapter = dev_get_drvdata(dev_d);
int rc;
u64_stats_update_begin(&adapter->syncp);
@@ -4462,7 +4460,8 @@ static int ena_resume(struct pci_dev *pdev)
rtnl_unlock();
return rc;
}
-#endif
+
+static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
static struct pci_driver ena_pci_driver = {
.name = DRV_MODULE_NAME,
@@ -4470,10 +4469,7 @@ static struct pci_driver ena_pci_driver = {
.probe = ena_probe,
.remove = ena_remove,
.shutdown = ena_shutdown,
-#ifdef CONFIG_PM
- .suspend = ena_suspend,
- .resume = ena_resume,
-#endif
+ .driver.pm = &ena_pm_ops,
.sriov_configure = pci_sriov_configure_simple,
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 66d31c018c7e..19689d72bc4e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -405,27 +405,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
/* Nothing to be done here. */
}
-#ifdef CONFIG_PM
-/**
- * \brief called when suspending
- * @param pdev Pointer to PCI device
- * @param state state to suspend to
- */
-static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
- pm_message_t state __attribute__((unused)))
-{
- return 0;
-}
-
-/**
- * \brief called when resuming
- * @param pdev Pointer to PCI device
- */
-static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
-{
- return 0;
-}
-#endif
+#define liquidio_suspend NULL
+#define liquidio_resume NULL
/* For PCI-E Advanced Error Recovery (AER) Interface */
static const struct pci_error_handlers liquidio_err_handler = {
@@ -451,17 +432,15 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
+
static struct pci_driver liquidio_pci_driver = {
.name = "LiquidIO",
.id_table = liquidio_pci_tbl,
.probe = liquidio_probe,
.remove = liquidio_remove,
.err_handler = &liquidio_err_handler, /* For AER */
-
-#ifdef CONFIG_PM
- .suspend = liquidio_suspend,
- .resume = liquidio_resume,
-#endif
+ .driver.pm = &liquidio_pm_ops,
#ifdef CONFIG_PCI_IOV
.sriov_configure = liquidio_enable_sriov,
#endif
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dc566fcc3ba9..ca97e321082d 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1928,11 +1928,9 @@ static void sundance_remove1(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
-
-static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused sundance_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
@@ -1942,30 +1940,24 @@ static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
netdev_close(dev);
netif_device_detach(dev);
- pci_save_state(pci_dev);
if (np->wol_enabled) {
iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
iowrite16(RxEnable, ioaddr + MACCtrl1);
}
- pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
- np->wol_enabled);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ device_set_wakeup_enable(dev_d, np->wol_enabled);
return 0;
}
-static int sundance_resume(struct pci_dev *pci_dev)
+static int __maybe_unused sundance_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
int err = 0;
if (!netif_running(dev))
return 0;
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, 0);
-
err = netdev_open(dev);
if (err) {
printk(KERN_ERR "%s: Can't resume interface!\n",
@@ -1979,17 +1971,14 @@ out:
return err;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
static struct pci_driver sundance_driver = {
.name = DRV_NAME,
.id_table = sundance_pci_tbl,
.probe = sundance_probe1,
.remove = sundance_remove1,
-#ifdef CONFIG_PM
- .suspend = sundance_suspend,
- .resume = sundance_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &sundance_pm_ops,
};
static int __init sundance_init(void)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a7ac23a6862b..e26f59336cfd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -6037,32 +6037,23 @@ do_none:
return status;
}
-static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused be_suspend(struct device *dev_d)
{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
be_intr_set(adapter, false);
be_cancel_err_detection(adapter);
be_cleanup(adapter);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int be_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused be_pci_resume(struct device *dev_d)
{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct be_adapter *adapter = dev_get_drvdata(dev_d);
int status = 0;
- status = pci_enable_device(pdev);
- if (status)
- return status;
-
- pci_restore_state(pdev);
-
status = be_resume(adapter);
if (status)
return status;
@@ -6234,13 +6225,14 @@ static const struct pci_error_handlers be_eeh_handlers = {
.resume = be_eeh_resume,
};
+static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
- .suspend = be_suspend,
- .resume = be_pci_resume,
+ .driver.pm = &be_pci_pm_ops,
.shutdown = be_shutdown,
.sriov_configure = be_pci_sriov_configure,
.err_handler = &be_eeh_handlers
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3d9aa7da95e9..4cae7db8d49c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4370,8 +4370,9 @@ static const struct pci_error_handlers mlx4_err_handler = {
.resume = mlx4_pci_resume,
};
-static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mlx4_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -4384,8 +4385,9 @@ static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int mlx4_resume(struct pci_dev *pdev)
+static int mlx4_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -4414,14 +4416,15 @@ static int mlx4_resume(struct pci_dev *pdev)
return ret;
}
+static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume);
+
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
.shutdown = mlx4_shutdown,
.remove = mlx4_remove_one,
- .suspend = mlx4_suspend,
- .resume = mlx4_resume,
+ .driver.pm = &mlx4_pm_ops,
.err_handler = &mlx4_err_handler,
};
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 4fe6aedca22f..24901342ecc0 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7155,17 +7155,14 @@ static void pcidev_exit(struct pci_dev *pdev)
kfree(info);
}
-#ifdef CONFIG_PM
-static int pcidev_resume(struct pci_dev *pdev)
+static int __maybe_unused pcidev_resume(struct device *dev_d)
{
int i;
- struct platform_info *info = pci_get_drvdata(pdev);
+ struct platform_info *info = dev_get_drvdata(dev_d);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
+ device_wakeup_disable(dev_d);
if (hw_priv->wol_enable)
hw_cfg_wol_pme(hw, 0);
@@ -7182,10 +7179,10 @@ static int pcidev_resume(struct pci_dev *pdev)
return 0;
}
-static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pcidev_suspend(struct device *dev_d)
{
int i;
- struct platform_info *info = pci_get_drvdata(pdev);
+ struct platform_info *info = dev_get_drvdata(dev_d);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
@@ -7207,12 +7204,9 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
hw_cfg_wol_pme(hw, 1);
}
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_wakeup_enable(dev_d);
return 0;
}
-#endif
static char pcidev_name[] = "ksz884xp";
@@ -7226,11 +7220,10 @@ static const struct pci_device_id pcidev_table[] = {
MODULE_DEVICE_TABLE(pci, pcidev_table);
+static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume);
+
static struct pci_driver pci_device_driver = {
-#ifdef CONFIG_PM
- .suspend = pcidev_suspend,
- .resume = pcidev_resume,
-#endif
+ .driver.pm = &pcidev_pm_ops,
.name = pcidev_name,
.id_table = pcidev_table,
.probe = pcidev_init,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d21d706b83a7..c2867fe995bc 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -3247,8 +3247,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
free_netdev (dev);
}
-#ifdef CONFIG_PM
-
/*
* The ns83815 chip doesn't have explicit RxStop bits.
* Kicking the Rx or Tx process for a new packet reenables the Rx process
@@ -3275,9 +3273,9 @@ static void natsemi_remove1(struct pci_dev *pdev)
* Interrupts must be disabled, otherwise hands_off can cause irq storms.
*/
-static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused natsemi_suspend(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
@@ -3326,11 +3324,10 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
}
-static int natsemi_resume (struct pci_dev *pdev)
+static int __maybe_unused natsemi_resume(struct device *dev_d)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct netdev_private *np = netdev_priv(dev);
- int ret = 0;
rtnl_lock();
if (netif_device_present(dev))
@@ -3339,12 +3336,6 @@ static int natsemi_resume (struct pci_dev *pdev)
const int irq = np->pci_dev->irq;
BUG_ON(!np->hands_off);
- ret = pci_enable_device(pdev);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "pci_enable_device() failed: %d\n", ret);
- goto out;
- }
/* pci_power_on(pdev); */
napi_enable(&np->napi);
@@ -3364,20 +3355,17 @@ static int natsemi_resume (struct pci_dev *pdev)
netif_device_attach(dev);
out:
rtnl_unlock();
- return ret;
+ return 0;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
static struct pci_driver natsemi_driver = {
.name = DRV_NAME,
.id_table = natsemi_pci_tbl,
.probe = natsemi_probe1,
.remove = natsemi_remove1,
-#ifdef CONFIG_PM
- .suspend = natsemi_suspend,
- .resume = natsemi_resume,
-#endif
+ .driver.pm = &natsemi_pm_ops,
};
static int __init natsemi_init_mod (void)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 9b63574b6202..5de85b9e9e35 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3999,12 +3999,11 @@ static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
}
}
-#ifdef CONFIG_PM
/**
* vxge_pm_suspend - vxge power management suspend entry point
*
*/
-static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
{
return -ENOSYS;
}
@@ -4012,13 +4011,11 @@ static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
* vxge_pm_resume - vxge power management resume entry point
*
*/
-static int vxge_pm_resume(struct pci_dev *pdev)
+static int __maybe_unused vxge_pm_resume(struct device *dev_d)
{
return -ENOSYS;
}
-#endif
-
/**
* vxge_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -4796,15 +4793,14 @@ static const struct pci_error_handlers vxge_err_handler = {
.resume = vxge_io_resume,
};
+static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
+
static struct pci_driver vxge_driver = {
.name = VXGE_DRIVER_NAME,
.id_table = vxge_id_table,
.probe = vxge_probe,
.remove = vxge_remove,
-#ifdef CONFIG_PM
- .suspend = vxge_pm_suspend,
- .resume = vxge_pm_resume,
-#endif
+ .driver.pm = &vxge_pm_ops,
.err_handler = &vxge_err_handler,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 0cd6b8bf023a..98527e42f918 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2451,7 +2451,7 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
ops->schedule_recovery_handler(cookie);
}
-char *qed_hw_err_type_descr[] = {
+static char *qed_hw_err_type_descr[] = {
[QED_HW_ERR_FAN_FAIL] = "Fan Failure",
[QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
[QED_HW_ERR_HW_ATTN] = "HW Attention",
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 55226b264e3c..c84b8d92caa9 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -336,6 +336,7 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
+ struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
@@ -344,8 +345,8 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
- "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+ dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
+ opcode, evt_ring_id, evt_ring->state);
return -ETIMEDOUT;
}
@@ -358,13 +359,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
-
- if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state %u before alloc\n",
+ evt_ring->state);
return -EINVAL;
+ }
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+ dev_err(gsi->dev, "bad event ring state %u after alloc\n",
evt_ring->state);
ret = -EIO;
}
@@ -381,14 +384,14 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
- dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+ dev_err(gsi->dev, "bad event ring state %u before reset\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+ dev_err(gsi->dev, "bad event ring state %u after reset\n",
evt_ring->state);
}
@@ -399,14 +402,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+ dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+ dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
evt_ring->state);
}
@@ -429,6 +432,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
+ struct device *dev = gsi->dev;
u32 val;
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
@@ -437,8 +441,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
return 0; /* Success! */
- dev_err(gsi->dev,
- "GSI command %u to channel %u timed out (state is %u)\n",
+ dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
return -ETIMEDOUT;
@@ -448,21 +451,23 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
- if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
+ dev_err(dev, "bad channel state %u before alloc\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
- state);
+ dev_err(dev, "bad channel state %u after alloc\n", state);
ret = -EIO;
}
@@ -472,21 +477,23 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Start an ALLOCATED channel */
static int gsi_channel_start_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
- state != GSI_CHANNEL_STATE_STOPPED)
+ state != GSI_CHANNEL_STATE_STOPPED) {
+ dev_err(dev, "bad channel state %u before start\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_START);
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after start\n", state);
+ dev_err(dev, "bad channel state %u after start\n", state);
ret = -EIO;
}
@@ -496,13 +503,16 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Stop a GSI channel in STARTED state */
static int gsi_channel_stop_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STARTED &&
- state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
+ dev_err(dev, "bad channel state %u before stop\n", state);
return -EINVAL;
+ }
ret = gsi_channel_command(channel, GSI_CH_STOP);
@@ -515,8 +525,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after stop\n", state);
+ dev_err(dev, "bad channel state %u after stop\n", state);
return -EIO;
}
@@ -524,6 +533,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
/* Reset a GSI channel in ALLOCATED or ERROR state. */
static void gsi_channel_reset_command(struct gsi_channel *channel)
{
+ struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
int ret;
@@ -532,8 +542,7 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
- dev_err(channel->gsi->dev,
- "bad channel state (%u) before reset\n", state);
+ dev_err(dev, "bad channel state %u before reset\n", state);
return;
}
@@ -542,21 +551,20 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
- dev_err(channel->gsi->dev,
- "bad channel state (%u) after reset\n", state);
+ dev_err(dev, "bad channel state %u after reset\n", state);
}
/* Deallocate an ALLOCATED GSI channel */
static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct device *dev = gsi->dev;
enum gsi_channel_state state;
int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(gsi->dev,
- "bad channel state (%u) before dealloc\n", state);
+ dev_err(dev, "bad channel state %u before dealloc\n", state);
return;
}
@@ -565,8 +573,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev,
- "bad channel state (%u) after dealloc\n", state);
+ dev_err(dev, "bad channel state %u after dealloc\n", state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -1148,8 +1155,8 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
break;
default:
dev_err(gsi->dev,
- "%s: unrecognized type 0x%08x\n",
- __func__, gsi_intr);
+ "unrecognized interrupt type 0x%08x\n",
+ gsi_intr);
break;
}
} while (intr_mask);
@@ -1253,7 +1260,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
if (ring->virt && addr % size) {
dma_free_coherent(dev, size, ring->virt, ring->addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
- size);
+ size);
return -EINVAL; /* Not a good error value, but distinct */
} else if (!ring->virt) {
return -ENOMEM;
@@ -1644,12 +1651,13 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi, bool legacy)
{
+ struct device *dev = gsi->dev;
u32 val;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
if (!(val & ENABLED_FMASK)) {
- dev_err(gsi->dev, "GSI has not been enabled\n");
+ dev_err(dev, "GSI has not been enabled\n");
return -EIO;
}
@@ -1657,24 +1665,24 @@ int gsi_setup(struct gsi *gsi, bool legacy)
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
if (!gsi->channel_count) {
- dev_err(gsi->dev, "GSI reports zero channels supported\n");
+ dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
}
if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
- dev_warn(gsi->dev,
- "limiting to %u channels (hardware supports %u)\n",
+ dev_warn(dev,
+ "limiting to %u channels; hardware supports %u\n",
GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
}
gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
if (!gsi->evt_ring_count) {
- dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+ dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
}
if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
- dev_warn(gsi->dev,
- "limiting to %u event rings (hardware supports %u)\n",
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
}
@@ -1760,19 +1768,19 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Make sure channel ids are in the range driver supports */
if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
- dev_err(dev, "bad channel id %u (must be less than %u)\n",
+ dev_err(dev, "bad channel id %u; must be less than %u\n",
channel_id, GSI_CHANNEL_COUNT_MAX);
return false;
}
if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
- dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+ dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
return false;
}
if (!data->channel.tlv_count ||
data->channel.tlv_count > GSI_TLV_MAX) {
- dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+ dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
channel_id, data->channel.tlv_count, GSI_TLV_MAX);
return false;
}
@@ -1790,13 +1798,13 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
}
if (!is_power_of_2(data->channel.tre_count)) {
- dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+ dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
channel_id, data->channel.tre_count);
return false;
}
if (!is_power_of_2(data->channel.event_count)) {
- dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+ dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
channel_id, data->channel.event_count);
return false;
}
@@ -1950,6 +1958,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
u32 count, const struct ipa_gsi_endpoint_data *data,
bool modem_alloc)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
unsigned int irq;
@@ -1957,7 +1966,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
gsi_validate_build();
- gsi->dev = &pdev->dev;
+ gsi->dev = dev;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -1968,43 +1977,41 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
/* Get the GSI IRQ and request for it to wake the system */
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
- dev_err(gsi->dev,
- "DT error %d getting \"gsi\" IRQ property\n", ret);
+ dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
return ret ? : -EINVAL;
}
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
if (ret) {
- dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
gsi->irq = irq;
ret = enable_irq_wake(gsi->irq);
if (ret)
- dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+ dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
gsi->irq_wake_enabled = !ret;
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
- dev_err(gsi->dev,
- "DT error getting \"gsi\" memory property\n");
+ dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
goto err_disable_irq_wake;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
- dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+ dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
goto err_disable_irq_wake;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
- dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
goto err_disable_irq_wake;
}
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f50d0d11704..a7b5a6407e8f 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -36,7 +36,7 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
+#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -530,7 +530,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (!endpoint->toward_ipa && endpoint->data->qmap)
+ if (endpoint->data->qmap)
val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -541,7 +541,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
u32 val;
- if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+ if (endpoint->data->dma_mode) {
enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
u32 dma_endpoint_id;
@@ -552,7 +552,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
} else {
val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
}
- /* Other bitfields unspecified (and 0) */
+ /* All other bits unspecified (and 0) */
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -576,17 +576,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
- u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- val |= u32_encode_bits(aggr_size,
- AGGR_BYTE_LIMIT_FMASK);
+
+ limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+
limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
- val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
- AGGR_TIME_LIMIT_FMASK);
- val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+ limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
+
+ /* AGGR_PKT_LIMIT is 0 (unlimited) */
+
if (endpoint->data->rx.aggr_close_eof)
val |= AGGR_SW_EOF_ACTIVE_FMASK;
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
@@ -671,7 +674,7 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
u32 offset;
u32 val;
- val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+ val = enable ? HOL_BLOCK_EN_FMASK : 0;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -683,7 +686,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[i];
- if (endpoint->ee_id != GSI_EE_MODEM)
+ if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
(void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
@@ -1297,16 +1300,16 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_seq(endpoint);
+ ipa_endpoint_init_mode(endpoint);
} else {
if (endpoint->ipa->version == IPA_VERSION_3_5_1)
(void)ipa_endpoint_program_suspend(endpoint, false);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_aggr(endpoint);
+ ipa_endpoint_init_hdr_metadata_mask(endpoint);
}
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_hdr(endpoint);
- ipa_endpoint_init_hdr_metadata_mask(endpoint);
- ipa_endpoint_init_mode(endpoint);
ipa_endpoint_status(endpoint);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 76d5108b8403..27a869df3a4b 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -674,6 +674,11 @@ static void ipa_validate_build(void)
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+
+ /* Aggregation granularity value can't be 0, and must fit */
+ BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
+ BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
+ field_max(AGGR_GRANULARITY));
#endif /* IPA_VALIDATE */
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 0a688d8c1d7c..eb4e39fa7d4b 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -32,10 +32,12 @@ struct ipa;
* parameter is supplied to the offset macro. The "ee" value is a member of
* the gsi_ee enumerated type.
*
- * The offset of a register dependent on endpoint id is computed by a macro
- * that is supplied a parameter "ep". The "ep" value is assumed to be less
- * than the maximum endpoint value for the current hardware, and that will
- * not exceed IPA_ENDPOINT_MAX.
+ * The offset of a register dependent on endpoint ID is computed by a macro
+ * that is supplied a parameter "ep", "txep", or "rxep". A register with an
+ * "ep" parameter is valid for any endpoint; a register with a "txep" or
+ * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
+ * "*ep" value is assumed to be less than the maximum valid endpoint ID
+ * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
*
* The offset of registers related to filter and route tables is computed
* by a macro that is supplied a parameter "er". The "er" represents an
@@ -190,24 +192,23 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version)
return 0x00000000;
}
-
#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+/* The internal inactivity timer clock is used for the aggregation timer */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+
#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
#define AGGR_GRANULARITY GENMASK(8, 4)
-/* Compute the value to use in the AGGR_GRANULARITY field representing
- * the given number of microseconds (up to 1 millisecond).
- * x = (32 * usec) / 1000 - 1
+/* Compute the value to use in the AGGR_GRANULARITY field representing the
+ * given number of microseconds. The value is one less than the number of
+ * timer ticks in the requested period. Zero not a valid granularity value.
*/
-static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+static inline u32 ipa_aggr_granularity_val(u32 usec)
{
- /* assert(microseconds >= 16); (?) */
- /* assert(microseconds <= 1015); */
-
- return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+ return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
@@ -293,11 +294,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
- (0x00000818 + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
+ (0x00000818 + 0x0070 * (rxep))
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
- (0x00000820 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
+ (0x00000820 + 0x0070 * (txep))
#define MODE_FMASK GENMASK(2, 0)
#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
@@ -316,19 +319,21 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
- (0x0000082c + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
+ (0x0000082c + 0x0070 * (rxep))
#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-/* The next register is valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
- (0x00000830 + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
+ (0x00000830 + 0x0070 * (rxep))
/* The next fields are present for IPA v4.2 only */
#define BASE_VALUE_FMASK GENMASK(4, 0)
#define SCALE_FMASK GENMASK(12, 8)
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
- (0x00000834 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
+ (0x00000834 + 0x0070 * (txep))
#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
@@ -338,8 +343,9 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
(0x00000838 + 0x0070 * (ep))
#define RSRC_GRP_FMASK GENMASK(1, 0)
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
- (0x0000083c + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
+ (0x0000083c + 0x0070 * (txep))
#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
@@ -353,7 +359,7 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
/* The next field is present for IPA v4.0 and above */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index a1f8db00d55a..9f9980ec2ed3 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -35,12 +35,6 @@
*/
/* Supports hardware interface version 0x2000 */
-/* Offset relative to the base of the IPA shared address space of the
- * shared region used for communication with the microcontroller. The
- * region is 128 bytes in size, but only the first 40 bytes are used.
- */
-#define IPA_MEM_UC_OFFSET 0x0000
-
/* Delay to allow a the microcontroller to save state when crashing */
#define IPA_SEND_DELAY 100 /* microseconds */
@@ -60,6 +54,10 @@
* @hw_state: state of hardware (including error type information)
* @warning_counter: counter of non-fatal hardware errors
* @interface_version: hardware-reported interface version
+ *
+ * A shared memory area at the base of IPA resident memory is used for
+ * communication with the microcontroller. The region is 128 bytes in
+ * size, but only the first 40 bytes (structured this way) are used.
*/
struct ipa_uc_mem_area {
u8 command; /* enum ipa_uc_command */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 05847eb91a1b..ae477f7756af 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -281,6 +281,9 @@ struct xenvif {
u8 ipv6_csum:1;
u8 multicast_control:1;
+ /* headroom requested by xen-netfront */
+ u16 xdp_headroom;
+
/* Is this interface disabled? True when backend discovers
* frontend is rogue.
*/
@@ -395,6 +398,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
+extern bool provides_xdp_headroom;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_stall_timeout_msecs;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0c8a02a1ead7..8af497285691 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -483,6 +483,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->queues = NULL;
vif->num_queues = 0;
+ vif->xdp_headroom = 0;
+
spin_lock_init(&vif->lock);
INIT_LIST_HEAD(&vif->fe_mcast_addr);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 315dfc6ea297..6dfca7265644 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,13 @@ unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
+/* The module parameter tells that we have to put data
+ * for xen-netfront with the XDP_PACKET_HEADROOM offset
+ * needed for XDP processing
+ */
+bool provides_xdp_headroom = true;
+module_param(provides_xdp_headroom, bool, 0644);
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ef5887037b22..ac034f69a170 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -258,6 +258,19 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
pkt->extra_count++;
}
+ if (queue->vif->xdp_headroom) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
+
+ memset(extra, 0, sizeof(struct xen_netif_extra_info));
+ extra->u.xdp.headroom = queue->vif->xdp_headroom;
+ extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
+ extra->flags = 0;
+
+ pkt->extra_count++;
+ }
+
if (skb->sw_hash) {
struct xen_netif_extra_info *extra;
@@ -356,7 +369,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue,
struct xen_netif_rx_request *req,
struct xen_netif_rx_response *rsp)
{
- unsigned int offset = 0;
+ unsigned int offset = queue->vif->xdp_headroom;
unsigned int flags;
do {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 286054b60d47..7e62a6ee7622 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -393,6 +393,24 @@ static void set_backend_state(struct backend_info *be,
}
}
+static void read_xenbus_frontend_xdp(struct backend_info *be,
+ struct xenbus_device *dev)
+{
+ struct xenvif *vif = be->vif;
+ u16 headroom;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "xdp-headroom", "%hu", &headroom);
+ if (err != 1) {
+ vif->xdp_headroom = 0;
+ return;
+ }
+ if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
+ headroom = XEN_NETIF_MAX_XDP_HEADROOM;
+ vif->xdp_headroom = headroom;
+}
+
/**
* Callback received when the frontend's state changes.
*/
@@ -417,6 +435,11 @@ static void frontend_changed(struct xenbus_device *dev,
set_backend_state(be, XenbusStateConnected);
break;
+ case XenbusStateReconfiguring:
+ read_xenbus_frontend_xdp(be, dev);
+ xenbus_switch_state(dev, XenbusStateReconfigured);
+ break;
+
case XenbusStateClosing:
set_backend_state(be, XenbusStateClosing);
break;
@@ -947,6 +970,8 @@ static int read_xenbus_vif_flags(struct backend_info *be)
vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
"feature-ipv6-csum-offload", 0);
+ read_xenbus_frontend_xdp(be, dev);
+
return 0;
}
@@ -1036,6 +1061,15 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ /* we can adjust a headroom for netfront XDP processing */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-xdp-headroom", "%d",
+ provides_xdp_headroom);
+ if (err) {
+ message = "writing feature-xdp-headroom";
+ goto abort_transaction;
+ }
+
/* We don't support rx-flip path (except old guests who
* don't grok this feature flag).
*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 482c6c8b0fb7..468f3f6f1425 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -44,6 +44,9 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -102,6 +105,8 @@ struct netfront_queue {
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
struct netfront_info *info;
+ struct bpf_prog __rcu *xdp_prog;
+
struct napi_struct napi;
/* Split event channels support, tx_* == rx_* when using
@@ -144,6 +149,9 @@ struct netfront_queue {
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
};
struct netfront_info {
@@ -159,6 +167,10 @@ struct netfront_info {
struct netfront_stats __percpu *rx_stats;
struct netfront_stats __percpu *tx_stats;
+ /* XDP state */
+ bool netback_has_xdp_headroom;
+ bool netfront_xdp_enabled;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -265,8 +277,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page) {
+ page = page_pool_dev_alloc_pages(queue->page_pool);
+ if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
}
@@ -560,6 +572,66 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue_idx;
}
+static int xennet_xdp_xmit_one(struct net_device *dev,
+ struct netfront_queue *queue,
+ struct xdp_frame *xdpf)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+ int notify;
+
+ xennet_make_first_txreq(queue, NULL,
+ virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data),
+ xdpf->len);
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->bytes += xdpf->len;
+ tx_stats->packets++;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ xennet_tx_buf_gc(queue);
+
+ return 0;
+}
+
+static int xennet_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ unsigned int num_queues = dev->real_num_tx_queues;
+ struct netfront_info *np = netdev_priv(dev);
+ struct netfront_queue *queue = NULL;
+ unsigned long irq_flags;
+ int drops = 0;
+ int i, err;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ queue = &np->queues[smp_processor_id() % num_queues];
+
+ spin_lock_irqsave(&queue->tx_lock, irq_flags);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (!xdpf)
+ continue;
+ err = xennet_xdp_xmit_one(dev, queue, xdpf);
+ if (err) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+ spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
+
+ return n - drops;
+}
+
+
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -778,23 +850,82 @@ static int xennet_get_extras(struct netfront_queue *queue,
return err;
}
+static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
+ struct xen_netif_rx_response *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, bool *need_xdp_flush)
+{
+ struct xdp_frame *xdpf;
+ u32 len = rx->status;
+ u32 act = XDP_PASS;
+ int err;
+
+ xdp->data_hard_start = page_address(pdata);
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &queue->xdp_rxq;
+ xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_TX:
+ get_page(pdata);
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
+ if (unlikely(err < 0))
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ case XDP_REDIRECT:
+ get_page(pdata);
+ err = xdp_do_redirect(queue->info->netdev, xdp, prog);
+ *need_xdp_flush = true;
+ if (unlikely(err))
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ case XDP_PASS:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ }
+
+ return act;
+}
+
static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ bool *need_xdp_flush)
{
struct xen_netif_rx_response *rx = &rinfo->rx;
- struct xen_netif_extra_info *extras = rinfo->extras;
- struct device *dev = &queue->info->netdev->dev;
+ int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
+ struct xen_netif_extra_info *extras = rinfo->extras;
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
- int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
+ struct device *dev = &queue->info->netdev->dev;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ unsigned long ret;
int slots = 1;
int err = 0;
- unsigned long ret;
+ u32 verdict;
if (rx->flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(queue, extras, rp);
+ if (!err) {
+ if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
+ struct xen_netif_extra_info *xdp;
+
+ xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
+ rx->offset = xdp->u.xdp.headroom;
+ }
+ }
cons = queue->rx.rsp_cons;
}
@@ -827,9 +958,24 @@ static int xennet_get_responses(struct netfront_queue *queue,
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
- __skb_queue_tail(list, skb);
-
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(queue->xdp_prog);
+ if (xdp_prog) {
+ if (!(rx->flags & XEN_NETRXF_more_data)) {
+ /* currently only a single page contains data */
+ verdict = xennet_run_xdp(queue,
+ skb_frag_page(&skb_shinfo(skb)->frags[0]),
+ rx, xdp_prog, &xdp, need_xdp_flush);
+ if (verdict != XDP_PASS)
+ err = -EINVAL;
+ } else {
+ /* drop the frame */
+ err = -EINVAL;
+ }
+ }
+ rcu_read_unlock();
next:
+ __skb_queue_tail(list, skb);
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -998,6 +1144,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head errq;
struct sk_buff_head tmpq;
int err;
+ bool need_xdp_flush = false;
spin_lock(&queue->rx_lock);
@@ -1014,7 +1161,8 @@ static int xennet_poll(struct napi_struct *napi, int budget)
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras));
- err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
+ err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
+ &need_xdp_flush);
if (unlikely(err)) {
err:
@@ -1060,6 +1208,8 @@ err:
i = ++queue->rx.rsp_cons;
work_done++;
}
+ if (need_xdp_flush)
+ xdp_do_flush();
__skb_queue_purge(&errq);
@@ -1261,6 +1411,101 @@ static void xennet_poll_controller(struct net_device *dev)
}
#endif
+#define NETBACK_XDP_HEADROOM_DISABLE 0
+#define NETBACK_XDP_HEADROOM_ENABLE 1
+
+static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
+{
+ int err;
+ unsigned short headroom;
+
+ headroom = xdp ? XDP_PACKET_HEADROOM : 0;
+ err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
+ "xdp-headroom", "%hu",
+ headroom);
+ if (err)
+ pr_warn("Error writing xdp-headroom\n");
+
+ return err;
+}
+
+static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+ struct netfront_info *np = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ unsigned int i, err;
+
+ if (dev->mtu > max_mtu) {
+ netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
+ return -EINVAL;
+ }
+
+ if (!np->netback_has_xdp_headroom)
+ return 0;
+
+ xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
+
+ err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
+ NETBACK_XDP_HEADROOM_DISABLE);
+ if (err)
+ return err;
+
+ /* avoid the race with XDP headroom adjustment */
+ wait_event(module_wq,
+ xenbus_read_driver_state(np->xbdev->otherend) ==
+ XenbusStateReconfigured);
+ np->netfront_xdp_enabled = true;
+
+ old_prog = rtnl_dereference(np->queues[0].xdp_prog);
+
+ if (prog)
+ bpf_prog_add(prog, dev->real_num_tx_queues);
+
+ for (i = 0; i < dev->real_num_tx_queues; ++i)
+ rcu_assign_pointer(np->queues[i].xdp_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < dev->real_num_tx_queues; ++i)
+ bpf_prog_put(old_prog);
+
+ xenbus_switch_state(np->xbdev, XenbusStateConnected);
+
+ return 0;
+}
+
+static u32 xennet_xdp_query(struct net_device *dev)
+{
+ unsigned int num_queues = dev->real_num_tx_queues;
+ struct netfront_info *np = netdev_priv(dev);
+ const struct bpf_prog *xdp_prog;
+ struct netfront_queue *queue;
+ unsigned int i;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ xdp_prog = rtnl_dereference(queue->xdp_prog);
+ if (xdp_prog)
+ return xdp_prog->aux->id;
+ }
+
+ return 0;
+}
+
+static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return xennet_xdp_set(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = xennet_xdp_query(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
@@ -1272,6 +1517,8 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features,
.ndo_select_queue = xennet_select_queue,
+ .ndo_bpf = xennet_xdp,
+ .ndo_xdp_xmit = xennet_xdp_xmit,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xennet_poll_controller,
#endif
@@ -1331,6 +1578,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
SET_NETDEV_DEV(netdev, &dev->dev);
np->netdev = netdev;
+ np->netfront_xdp_enabled = false;
netif_carrier_off(netdev);
@@ -1419,6 +1667,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->rx_ring_ref = GRANT_INVALID_REF;
queue->tx.sring = NULL;
queue->rx.sring = NULL;
+
+ page_pool_destroy(queue->page_pool);
}
}
@@ -1754,6 +2004,51 @@ static void xennet_destroy_queues(struct netfront_info *info)
info->queues = NULL;
}
+
+
+static int xennet_create_page_pool(struct netfront_queue *queue)
+{
+ int err;
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = 0,
+ .pool_size = NET_RX_RING_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = &queue->info->netdev->dev,
+ .offset = XDP_PACKET_HEADROOM,
+ .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+ };
+
+ queue->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(queue->page_pool)) {
+ err = PTR_ERR(queue->page_pool);
+ queue->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
+ queue->id);
+ if (err) {
+ netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
+ goto err_free_pp;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
+ MEM_TYPE_PAGE_POOL, queue->page_pool);
+ if (err) {
+ netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
+ goto err_unregister_rxq;
+ }
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&queue->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(queue->page_pool);
+ queue->page_pool = NULL;
+ return err;
+}
+
static int xennet_create_queues(struct netfront_info *info,
unsigned int *num_queues)
{
@@ -1779,6 +2074,14 @@ static int xennet_create_queues(struct netfront_info *info,
break;
}
+ /* use page pool recycling instead of buddy allocator */
+ ret = xennet_create_page_pool(queue);
+ if (ret < 0) {
+ dev_err(&info->xbdev->dev, "can't allocate page pool\n");
+ *num_queues = i;
+ return ret;
+ }
+
netif_napi_add(queue->info->netdev, &queue->napi,
xennet_poll, 64);
if (netif_running(info->netdev))
@@ -1825,6 +2128,17 @@ static int talk_to_netback(struct xenbus_device *dev,
goto out_unlocked;
}
+ info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-xdp-headroom", 0);
+ if (info->netback_has_xdp_headroom) {
+ /* set the current xen-netfront xdp state */
+ err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
+ NETBACK_XDP_HEADROOM_ENABLE :
+ NETBACK_XDP_HEADROOM_DISABLE);
+ if (err)
+ goto out_unlocked;
+ }
+
rtnl_lock();
if (info->queues)
xennet_destroy_queues(info);
@@ -1959,6 +2273,8 @@ static int xennet_connect(struct net_device *dev)
err = talk_to_netback(np->xbdev, np);
if (err)
return err;
+ if (np->netback_has_xdp_headroom)
+ pr_info("backend supports XDP headroom\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a00e1764e9b1..7d132cc1e584 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -86,6 +86,11 @@
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+#ifdef CONFIG_XFRM_OFFLOAD
+#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+#endif /* CONFIG_XFRM_OFFLOAD */
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 4f20dbc42910..2194322c3c7f 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -161,6 +161,19 @@
*/
/*
+ * "xdp-headroom" is used to request that extra space is added
+ * for XDP processing. The value is measured in bytes and passed by
+ * the frontend to be consistent between both ends.
+ * If the value is greater than zero that means that
+ * an RX response is going to be passed to an XDP program for processing.
+ * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes
+ *
+ * "feature-xdp-headroom" is set to "1" by the netback side like other features
+ * so a guest can check if an XDP program can be processed.
+ */
+#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF
+
+/*
* Control ring
* ============
*
@@ -846,7 +859,8 @@ struct xen_netif_tx_request {
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
-#define XEN_NETIF_EXTRA_TYPE_MAX (5)
+#define XEN_NETIF_EXTRA_TYPE_XDP (5) /* u.xdp */
+#define XEN_NETIF_EXTRA_TYPE_MAX (6)
/* xen_netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -879,6 +893,10 @@ struct xen_netif_extra_info {
uint8_t algorithm;
uint8_t value[4];
} hash;
+ struct {
+ uint16_t headroom;
+ uint16_t pad[2];
+ } xdp;
uint16_t pad[3];
} u;
};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 29bd405adbbd..7b436ebde61d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4293,7 +4293,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
- int err = -EINVAL;
+ int err;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;