diff options
author | David S. Miller <davem@davemloft.net> | 2020-07-21 03:01:45 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-07-21 03:01:45 +0300 |
commit | 87c831ce8e51cad98300fe98f59e26e404720217 (patch) | |
tree | 0b5f0169a4addfde4b7e9868a3fac25c45ec5c4d | |
parent | a8b7b2d0b3fc965d823e362840e5451e2eb4a71b (diff) | |
parent | 9d45c8e89079dfe5489efaae9cea12a539ebc3ff (diff) | |
download | linux-87c831ce8e51cad98300fe98f59e26e404720217.tar.xz |
Merge branch 'net-macb-Wake-on-Lan-magic-packet-GEM-and-MACB-handling'
Nicolas Ferre says:
====================
net: macb: Wake-on-Lan magic packet GEM and MACB handling
Here is the second part of support for WoL magic-packet on the current macb
driver. This one
is addressing the bulk of the feature and is based on current net-next/master.
MACB and GEM code must co-exist and as they don't share exactly the same
register layout, I had to specialize a bit the suspend/resume paths and plug a
specific IRQ handler in order to avoid overloading the "normal" IRQ hot path.
These changes were tested on both sam9x60 which embeds a MACB+FIFO controller
and sama5d2 which has a GEM+packet buffer type of controller.
Best regards,
Nicolas
Changes in v7:
- Release the spinlock before exiting macb_suspend/resume in case of error
changing IRQ handler
Changes in v6:
- rebase on net-next/master now that the "fixes" patches of the series are
merged in both net and net-next.
- GEM addition and MACB update to finish the support of WoL magic-packet on the
two revisions of the controller.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/cadence/macb.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/cadence/macb_main.c | 191 |
2 files changed, 167 insertions, 27 deletions
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index ab827fb4b6b9..4f1b41569260 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -90,6 +90,7 @@ #define GEM_SA3T 0x009C /* Specific3 Top */ #define GEM_SA4B 0x00A0 /* Specific4 Bottom */ #define GEM_SA4T 0x00A4 /* Specific4 Top */ +#define GEM_WOL 0x00b8 /* Wake on LAN */ #define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */ #define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */ #define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */ @@ -396,6 +397,8 @@ #define MACB_PDRSFT_SIZE 1 #define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ #define MACB_SRI_SIZE 1 +#define GEM_WOL_OFFSET 28 /* Enable wake-on-lan interrupt */ +#define GEM_WOL_SIZE 1 /* Timer increment fields */ #define MACB_TI_CNS_OFFSET 0 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index e5e37aa81b02..a6a35e1b0115 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1517,6 +1517,64 @@ static void macb_tx_restart(struct macb_queue *queue) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } +static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) +{ + struct macb_queue *queue = dev_id; + struct macb *bp = queue->bp; + u32 status; + + status = queue_readl(queue, ISR); + + if (unlikely(!status)) + return IRQ_NONE; + + spin_lock(&bp->lock); + + if (status & MACB_BIT(WOL)) { + queue_writel(queue, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", + (unsigned int)(queue - bp->queues), + (unsigned long)status); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(WOL)); + pm_wakeup_event(&bp->pdev->dev, 0); + } + + spin_unlock(&bp->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) +{ + struct macb_queue *queue = dev_id; + struct macb *bp = queue->bp; + u32 status; + + status = queue_readl(queue, ISR); + + if (unlikely(!status)) + return IRQ_NONE; + + spin_lock(&bp->lock); + + if (status & GEM_BIT(WOL)) { + queue_writel(queue, IDR, GEM_BIT(WOL)); + gem_writel(bp, WOL, 0); + netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", + (unsigned int)(queue - bp->queues), + (unsigned long)status); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, GEM_BIT(WOL)); + pm_wakeup_event(&bp->pdev->dev, 0); + } + + spin_unlock(&bp->lock); + + return IRQ_HANDLED; +} + static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -3316,6 +3374,8 @@ static const struct ethtool_ops macb_ethtool_ops = { static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, + .get_wol = macb_get_wol, + .set_wol = macb_set_wol, .get_link = ethtool_op_get_link, .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, @@ -4567,33 +4627,77 @@ static int __maybe_unused macb_suspend(struct device *dev) struct macb_queue *queue = bp->queues; unsigned long flags; unsigned int q; + int err; if (!netif_running(netdev)) return 0; if (bp->wol & MACB_WOL_ENABLED) { - macb_writel(bp, IER, MACB_BIT(WOL)); - macb_writel(bp, WOL, MACB_BIT(MAG)); - enable_irq_wake(bp->queues[0].irq); - netif_device_detach(netdev); - } else { - netif_device_detach(netdev); + spin_lock_irqsave(&bp->lock, flags); + /* Flush all status bits */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_disable(&queue->napi); + ++q, ++queue) { + /* Disable all interrupts */ + queue_writel(queue, IDR, -1); + queue_readl(queue, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, -1); + } + /* Change interrupt handler and + * Enable WoL IRQ on queue 0 + */ + devm_free_irq(dev, bp->queues[0].irq, bp->queues); + if (macb_is_gem(bp)) { + err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } + queue_writel(bp->queues, IER, GEM_BIT(WOL)); + gem_writel(bp, WOL, MACB_BIT(MAG)); + } else { + err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } + queue_writel(bp->queues, IER, MACB_BIT(WOL)); + macb_writel(bp, WOL, MACB_BIT(MAG)); + } + spin_unlock_irqrestore(&bp->lock, flags); + + enable_irq_wake(bp->queues[0].irq); + } + + netif_device_detach(netdev); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_disable(&queue->napi); + + if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); phylink_stop(bp->phylink); rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); spin_unlock_irqrestore(&bp->lock, flags); + } - if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) - bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); - if (netdev->hw_features & NETIF_F_NTUPLE) - bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); - } + if (netdev->hw_features & NETIF_F_NTUPLE) + bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); @@ -4608,7 +4712,9 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); struct macb_queue *queue = bp->queues; + unsigned long flags; unsigned int q; + int err; if (!netif_running(netdev)) return 0; @@ -4617,29 +4723,60 @@ static int __maybe_unused macb_resume(struct device *dev) pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { - macb_writel(bp, IDR, MACB_BIT(WOL)); - macb_writel(bp, WOL, 0); - disable_irq_wake(bp->queues[0].irq); - } else { - macb_writel(bp, NCR, MACB_BIT(MPE)); - - if (netdev->hw_features & NETIF_F_NTUPLE) - gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); + spin_lock_irqsave(&bp->lock, flags); + /* Disable WoL */ + if (macb_is_gem(bp)) { + queue_writel(bp->queues, IDR, GEM_BIT(WOL)); + gem_writel(bp, WOL, 0); + } else { + queue_writel(bp->queues, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + } + /* Clear ISR on queue 0 */ + queue_readl(bp->queues, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(bp->queues, ISR, -1); + /* Replace interrupt handler on queue 0 */ + devm_free_irq(dev, bp->queues[0].irq, bp->queues); + err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } + spin_unlock_irqrestore(&bp->lock, flags); - if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) - macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); + disable_irq_wake(bp->queues[0].irq); - for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_enable(&queue->napi); + /* Now make sure we disable phy before moving + * to common restore path + */ rtnl_lock(); - phylink_start(bp->phylink); + phylink_stop(bp->phylink); rtnl_unlock(); } + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_enable(&queue->napi); + + if (netdev->hw_features & NETIF_F_NTUPLE) + gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); + + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); + + macb_writel(bp, NCR, MACB_BIT(MPE)); macb_init_hw(bp); macb_set_rx_mode(netdev); macb_restore_features(bp); + rtnl_lock(); + phylink_start(bp->phylink); + rtnl_unlock(); + netif_device_attach(netdev); if (bp->ptp_info) bp->ptp_info->ptp_init(netdev); |