diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-05-24 12:13:01 +0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-05-24 12:13:01 +0400 |
commit | e644dae645e167d154c0526358940986682a72b0 (patch) | |
tree | 972993c6568085b8d407fc7e13de10f4b93c651d /drivers/net/ethernet/realtek | |
parent | 899c612d74d4a242158a4db20367388d6299c028 (diff) | |
parent | 86809173ce32ef03bd4d0389dfc72df0c805e9c4 (diff) | |
download | linux-e644dae645e167d154c0526358940986682a72b0.tar.xz |
Merge branch 'next' into for-linus
Diffstat (limited to 'drivers/net/ethernet/realtek')
-rw-r--r-- | drivers/net/ethernet/realtek/8139too.c | 109 | ||||
-rw-r--r-- | drivers/net/ethernet/realtek/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/realtek/atp.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/realtek/r8169.c | 1611 |
4 files changed, 952 insertions, 781 deletions
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index a8779bedb3d9..df7fd8d083dc 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -565,6 +565,12 @@ struct rtl_extra_stats { unsigned long rx_lost_in_ring; }; +struct rtl8139_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + struct rtl8139_private { void __iomem *mmio_addr; int drv_flags; @@ -575,11 +581,13 @@ struct rtl8139_private { unsigned char *rx_ring; unsigned int cur_rx; /* RX buf index of next pkt */ + struct rtl8139_stats rx_stats; dma_addr_t rx_ring_dma; unsigned int tx_flag; unsigned long cur_tx; unsigned long dirty_tx; + struct rtl8139_stats tx_stats; unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ unsigned char *tx_bufs; /* Tx bounce buffer region. */ dma_addr_t tx_bufs_dma; @@ -641,7 +649,9 @@ static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); +static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 + *stats); static void rtl8139_set_rx_mode (struct net_device *dev); static void __set_rx_mode (struct net_device *dev); static void rtl8139_hw_start (struct net_device *dev); @@ -754,10 +764,9 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) /* dev and priv zeroed in alloc_etherdev */ dev = alloc_etherdev (sizeof (*tp)); - if (dev == NULL) { - dev_err(&pdev->dev, "Unable to alloc new net device\n"); + if (dev == NULL) return ERR_PTR(-ENOMEM); - } + SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -908,10 +917,37 @@ err_out: return ERR_PTR(rc); } +static int rtl8139_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl8139_private *tp = netdev_priv(dev); + unsigned long flags; + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL))) + return 0; + + spin_lock_irqsave(&tp->lock, flags); + + if (changed & NETIF_F_RXALL) { + int rx_mode = tp->rx_config; + if (features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + else + rx_mode &= ~(AcceptErr | AcceptRunt); + tp->rx_config = rtl8139_rx_config | rx_mode; + RTL_W32_F(RxConfig, tp->rx_config); + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + static const struct net_device_ops rtl8139_netdev_ops = { .ndo_open = rtl8139_open, .ndo_stop = rtl8139_close, - .ndo_get_stats = rtl8139_get_stats, + .ndo_get_stats64 = rtl8139_get_stats64, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = rtl8139_set_mac_address, @@ -922,6 +958,7 @@ static const struct net_device_ops rtl8139_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8139_poll_controller, #endif + .ndo_set_features = rtl8139_set_features, }; static int __devinit rtl8139_init_one (struct pci_dev *pdev, @@ -995,6 +1032,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; dev->vlan_features = dev->features; + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + dev->irq = pdev->irq; /* tp zeroed and aligned in alloc_etherdev */ @@ -1777,8 +1817,10 @@ static void rtl8139_tx_interrupt (struct net_device *dev, dev->stats.tx_fifo_errors++; } dev->stats.collisions += (txstatus >> 24) & 15; - dev->stats.tx_bytes += txstatus & 0x7ff; - dev->stats.tx_packets++; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += txstatus & 0x7ff; + u64_stats_update_end(&tp->tx_stats.syncp); } dirty_tx++; @@ -1941,7 +1983,10 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, /* read size+status of next frame from DMA ring buffer */ rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); rx_size = rx_status >> 16; - pkt_size = rx_size - 4; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = rx_size - 4; + else + pkt_size = rx_size; netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", __func__, rx_status, rx_size, cur_rx); @@ -1979,11 +2024,30 @@ no_early_rx: if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || (rx_size < 8) || (!(rx_status & RxStatusOK)))) { + if ((dev->features & NETIF_F_RXALL) && + (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) && + (rx_size >= 8) && + (!(rx_status & RxStatusOK))) { + /* Length is at least mostly OK, but pkt has + * error. I'm hoping we can handle some of these + * errors without resetting the chip. --Ben + */ + dev->stats.rx_errors++; + if (rx_status & RxCRCErr) { + dev->stats.rx_crc_errors++; + goto keep_pkt; + } + if (rx_status & RxRunt) { + dev->stats.rx_length_errors++; + goto keep_pkt; + } + } rtl8139_rx_err (rx_status, dev, tp, ioaddr); received = -1; goto out; } +keep_pkt: /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ @@ -1998,8 +2062,10 @@ no_early_rx: skb->protocol = eth_type_trans (skb, dev); - dev->stats.rx_bytes += pkt_size; - dev->stats.rx_packets++; + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); netif_receive_skb (skb); } else { @@ -2463,11 +2529,13 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } -static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) +static struct rtnl_link_stats64 * +rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8139_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; + unsigned int start; if (netif_running(dev)) { spin_lock_irqsave (&tp->lock, flags); @@ -2476,7 +2544,21 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); } - return &dev->stats; + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + + return stats; } /* Set or clear the multicast filter for this adaptor. @@ -2516,6 +2598,9 @@ static void __set_rx_mode (struct net_device *dev) } } + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + /* We can safely update without stopping the chip. */ tmp = rtl8139_rx_config | rx_mode; if (tp->rx_config != tmp) { diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 0578859a3c73..5821966f9f28 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -24,11 +24,11 @@ config ATP select CRC32 ---help--- This is a network (Ethernet) device which attaches to your parallel - port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO, - available from <http://www.tldp.org/docs.html#howto>, if you - want to use this. If you intend to use this driver, you should have - said N to the "Parallel printer support", because the two drivers - don't like each other. + port. Read <file:drivers/net/ethernet/realtek/atp.c> as well as the + Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>, + if you want to use this. If you intend to use this driver, you + should have said N to the "Parallel printer support", because the two + drivers don't like each other. To compile this driver as a module, choose M here: the module will be called atp. diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index e3f57fdbf0ea..e02f04d7f3ad 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -140,7 +140,6 @@ static int xcvr[NUM_UNITS]; /* The data transfer mode. */ #include <linux/delay.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> @@ -783,7 +782,7 @@ static void net_rx(struct net_device *dev) int pkt_len = (rx_head.rx_count & 0x7ff) - 4; struct sk_buff *skb; - skb = dev_alloc_skb(pkt_len + 2); + skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7a0c800b50ad..f54509377efa 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -29,7 +29,6 @@ #include <linux/pci-aspm.h> #include <linux/prefetch.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> @@ -255,10 +254,6 @@ enum cfg_version { RTL_CFG_2 }; -static void rtl_hw_start_8169(struct net_device *); -static void rtl_hw_start_8168(struct net_device *); -static void rtl_hw_start_8101(struct net_device *); - static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, @@ -667,12 +662,25 @@ struct rtl8169_counters { __le16 tx_underun; }; +enum rtl_flag { + RTL_FLAG_TASK_ENABLED, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_PHY_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; struct net_device *dev; struct napi_struct napi; - spinlock_t lock; u32 msg_enable; u16 txd_version; u16 mac_version; @@ -680,6 +688,8 @@ struct rtl8169_private { u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ u32 dirty_rx; u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; @@ -688,9 +698,8 @@ struct rtl8169_private { struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ struct timer_list timer; u16 cp_cmd; - u16 intr_event; - u16 napi_event; - u16 intr_mask; + + u16 event_slow; struct mdio_ops { void (*write)(void __iomem *, int, int); @@ -714,7 +723,13 @@ struct rtl8169_private { unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); unsigned int (*link_ok)(void __iomem *); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); - struct delayed_work task; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + unsigned features; struct mii_if_info mii; @@ -754,22 +769,15 @@ MODULE_FIRMWARE(FIRMWARE_8105E_1); MODULE_FIRMWARE(FIRMWARE_8168F_1); MODULE_FIRMWARE(FIRMWARE_8168F_2); -static int rtl8169_open(struct net_device *dev); -static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); -static int rtl8169_init_ring(struct net_device *dev); -static void rtl_hw_start(struct net_device *dev); -static int rtl8169_close(struct net_device *dev); -static void rtl_set_rx_mode(struct net_device *dev); -static void rtl8169_tx_timeout(struct net_device *dev); -static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); -static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, - void __iomem *, u32 budget); -static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); -static void rtl8169_down(struct net_device *dev); -static void rtl8169_rx_clear(struct rtl8169_private *tp); -static int rtl8169_poll(struct napi_struct *napi, int budget); +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) { @@ -1180,12 +1188,51 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) return value; } +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R16(IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); +} + static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - RTL_W16(IntrMask, 0x0000); - RTL_W16(IntrStatus, tp->intr_event); + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); RTL_R8(ChipCmd); } @@ -1276,9 +1323,6 @@ static void __rtl8169_check_link_status(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr, bool pm) { - unsigned long flags; - - spin_lock_irqsave(&tp->lock, flags); if (tp->link_ok(ioaddr)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ @@ -1293,7 +1337,6 @@ static void __rtl8169_check_link_status(struct net_device *dev, if (pm) pm_schedule_suspend(&tp->pci_dev->dev, 5000); } - spin_unlock_irqrestore(&tp->lock, flags); } static void rtl8169_check_link_status(struct net_device *dev, @@ -1336,12 +1379,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); wol->supported = WAKE_ANY; wol->wolopts = __rtl8169_get_wol(tp); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1378,14 +1421,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); if (wol->wolopts) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; __rtl8169_set_wol(tp, wol->wolopts); - spin_unlock_irq(&tp->lock); + + rtl_unlock_work(tp); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); @@ -1540,15 +1584,14 @@ out: static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; int ret; del_timer_sync(&tp->timer); - spin_lock_irqsave(&tp->lock, flags); + rtl_lock_work(tp); ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), cmd->duplex, cmd->advertising); - spin_unlock_irqrestore(&tp->lock, flags); + rtl_unlock_work(tp); return ret; } @@ -1568,33 +1611,51 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev, return features; } -static int rtl8169_set_features(struct net_device *dev, - netdev_features_t features) +static void __rtl8169_set_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; - spin_lock_irqsave(&tp->lock, flags); + if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX))) + return; - if (features & NETIF_F_RXCSUM) - tp->cp_cmd |= RxChkSum; - else - tp->cp_cmd &= ~RxChkSum; + if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) { + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; - if (dev->features & NETIF_F_HW_VLAN_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + } + if (changed & NETIF_F_RXALL) { + int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); + if (features & NETIF_F_RXALL) + tmp |= (AcceptErr | AcceptRunt); + RTL_W32(RxConfig, tmp); + } +} - spin_unlock_irqrestore(&tp->lock, flags); +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + __rtl8169_set_features(dev, features); + rtl_unlock_work(tp); return 0; } + static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, struct sk_buff *skb) { @@ -1643,14 +1704,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; int rc; - spin_lock_irqsave(&tp->lock, flags); - + rtl_lock_work(tp); rc = tp->get_settings(dev, cmd); + rtl_unlock_work(tp); - spin_unlock_irqrestore(&tp->lock, flags); return rc; } @@ -1658,14 +1717,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct rtl8169_private *tp = netdev_priv(dev); - unsigned long flags; if (regs->len > R8169_REGS_SIZE) regs->len = R8169_REGS_SIZE; - spin_lock_irqsave(&tp->lock, flags); + rtl_lock_work(tp); memcpy_fromio(p, tp->mmio_addr, regs->len); - spin_unlock_irqrestore(&tp->lock, flags); + rtl_unlock_work(tp); } static u32 rtl8169_get_msglevel(struct net_device *dev) @@ -3182,18 +3240,14 @@ static void rtl_hw_phy_config(struct net_device *dev) } } -static void rtl8169_phy_timer(unsigned long __opaque) +static void rtl_phy_work(struct rtl8169_private *tp) { - struct net_device *dev = (struct net_device *)__opaque; - struct rtl8169_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; void __iomem *ioaddr = tp->mmio_addr; unsigned long timeout = RTL8169_PHY_TIMEOUT; assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - spin_lock_irq(&tp->lock); - if (tp->phy_reset_pending(tp)) { /* * A busy loop could burn quite a few cycles on nowadays CPU. @@ -3204,34 +3258,29 @@ static void rtl8169_phy_timer(unsigned long __opaque) } if (tp->link_ok(ioaddr)) - goto out_unlock; + return; - netif_warn(tp, link, dev, "PHY reset until link up\n"); + netif_warn(tp, link, tp->dev, "PHY reset until link up\n"); tp->phy_reset_enable(tp); out_mod_timer: mod_timer(timer, jiffies + timeout); -out_unlock: - spin_unlock_irq(&tp->lock); } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void rtl8169_netpoll(struct net_device *dev) +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - disable_irq(pdev->irq); - rtl8169_interrupt(pdev->irq, dev); - enable_irq(pdev->irq); + rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); } -#endif static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, void __iomem *ioaddr) @@ -3310,7 +3359,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); high = addr[4] | (addr[5] << 8); - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); RTL_W8(Cfg9346, Cfg9346_Unlock); @@ -3334,7 +3383,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) RTL_W8(Cfg9346, Cfg9346_Lock); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); } static int rtl_set_mac_address(struct net_device *dev, void *p) @@ -3384,69 +3433,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data return -EOPNOTSUPP; } -static const struct rtl_cfg_info { - void (*hw_start)(struct net_device *); - unsigned int region; - unsigned int align; - u16 intr_event; - u16 napi_event; - unsigned features; - u8 default_ver; -} rtl_cfg_infos [] = { - [RTL_CFG_0] = { - .hw_start = rtl_hw_start_8169, - .region = 1, - .align = 0, - .intr_event = SYSErr | LinkChg | RxOverflow | - RxFIFOOver | TxErr | TxOK | RxOK | RxErr, - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_GMII, - .default_ver = RTL_GIGA_MAC_VER_01, - }, - [RTL_CFG_1] = { - .hw_start = rtl_hw_start_8168, - .region = 2, - .align = 8, - .intr_event = SYSErr | LinkChg | RxOverflow | - TxErr | TxOK | RxOK | RxErr, - .napi_event = TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, - .default_ver = RTL_GIGA_MAC_VER_11, - }, - [RTL_CFG_2] = { - .hw_start = rtl_hw_start_8101, - .region = 2, - .align = 8, - .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | - RxFIFOOver | TxErr | TxOK | RxOK | RxErr, - .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, - .features = RTL_FEATURE_MSI, - .default_ver = RTL_GIGA_MAC_VER_13, - } -}; - -/* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct rtl8169_private *tp, - const struct rtl_cfg_info *cfg) -{ - void __iomem *ioaddr = tp->mmio_addr; - unsigned msi = 0; - u8 cfg2; - - cfg2 = RTL_R8(Config2) & ~MSIEnable; - if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(tp->pci_dev)) { - netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); - } else { - cfg2 |= MSIEnable; - msi = RTL_FEATURE_MSI; - } - } - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - RTL_W8(Config2, cfg2); - return msi; -} - static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) { if (tp->features & RTL_FEATURE_MSI) { @@ -3455,25 +3441,6 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) } } -static const struct net_device_ops rtl8169_netdev_ops = { - .ndo_open = rtl8169_open, - .ndo_stop = rtl8169_close, - .ndo_get_stats = rtl8169_get_stats, - .ndo_start_xmit = rtl8169_start_xmit, - .ndo_tx_timeout = rtl8169_tx_timeout, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = rtl8169_change_mtu, - .ndo_fix_features = rtl8169_fix_features, - .ndo_set_features = rtl8169_set_features, - .ndo_set_mac_address = rtl_set_mac_address, - .ndo_do_ioctl = rtl8169_ioctl, - .ndo_set_rx_mode = rtl_set_rx_mode, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = rtl8169_netpoll, -#endif - -}; - static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) { struct mdio_ops *ops = &tp->mdio_ops; @@ -3781,12 +3748,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.enable); + RTL_W8(Cfg9346, Cfg9346_Lock); } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); rtl_generic_op(tp, tp->jumbo_ops.disable); + RTL_W8(Cfg9346, Cfg9346_Lock); } static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) @@ -3824,23 +3799,21 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(MaxTxPacketSize, 0x3f); RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); RTL_W8(Config4, RTL_R8(Config4) | 0x01); - pci_write_config_byte(pdev, 0x79, 0x20); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); } static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; RTL_W8(MaxTxPacketSize, 0x0c); RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); RTL_W8(Config4, RTL_R8(Config4) & ~0x01); - pci_write_config_byte(pdev, 0x79, 0x50); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); } static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) @@ -3939,280 +3912,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) } } -static int __devinit -rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; - const unsigned int region = cfg->region; - struct rtl8169_private *tp; - struct mii_if_info *mii; - struct net_device *dev; - void __iomem *ioaddr; - int chipset, i; - int rc; - - if (netif_msg_drv(&debug)) { - printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", - MODULENAME, RTL8169_VERSION); - } - - dev = alloc_etherdev(sizeof (*tp)); - if (!dev) { - if (netif_msg_drv(&debug)) - dev_err(&pdev->dev, "unable to alloc new ethernet\n"); - rc = -ENOMEM; - goto out; - } - - SET_NETDEV_DEV(dev, &pdev->dev); - dev->netdev_ops = &rtl8169_netdev_ops; - tp = netdev_priv(dev); - tp->dev = dev; - tp->pci_dev = pdev; - tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); - - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - /* enable device (incl. PCI PM wakeup and hotplug setup) */ - rc = pci_enable_device(pdev); - if (rc < 0) { - netif_err(tp, probe, dev, "enable failure\n"); - goto err_out_free_dev_1; - } - - if (pci_set_mwi(pdev) < 0) - netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); - - /* make sure PCI base addr 1 is MMIO */ - if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { - netif_err(tp, probe, dev, - "region #%d not an MMIO resource, aborting\n", - region); - rc = -ENODEV; - goto err_out_mwi_2; - } - - /* check for weird/broken PCI region reporting */ - if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { - netif_err(tp, probe, dev, - "Invalid PCI region size(s), aborting\n"); - rc = -ENODEV; - goto err_out_mwi_2; - } - - rc = pci_request_regions(pdev, MODULENAME); - if (rc < 0) { - netif_err(tp, probe, dev, "could not request regions\n"); - goto err_out_mwi_2; - } - - tp->cp_cmd = RxChkSum; - - if ((sizeof(dma_addr_t) > 4) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { - tp->cp_cmd |= PCIDAC; - dev->features |= NETIF_F_HIGHDMA; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); - goto err_out_free_res_3; - } - } - - /* ioremap MMIO region */ - ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); - if (!ioaddr) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); - rc = -EIO; - goto err_out_free_res_3; - } - tp->mmio_addr = ioaddr; - - if (!pci_is_pcie(pdev)) - netif_info(tp, probe, dev, "not PCI Express\n"); - - /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, dev, cfg->default_ver); - - rtl_init_rxcfg(tp); - - RTL_W16(IntrMask, 0x0000); - - rtl_hw_reset(tp); - - RTL_W16(IntrStatus, 0xffff); - - pci_set_master(pdev); - - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - tp->cp_cmd |= RxVlan; - - rtl_init_mdio_ops(tp); - rtl_init_pll_power_ops(tp); - rtl_init_jumbo_ops(tp); - - rtl8169_print_mac_version(tp); - - chipset = tp->mac_version; - tp->txd_version = rtl_chip_infos[chipset].txd_version; - - RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W8(Config1, RTL_R8(Config1) | PMEnable); - RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; - if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) - tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(tp, cfg); - RTL_W8(Cfg9346, Cfg9346_Lock); - - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_settings = rtl8169_gset_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_settings = rtl8169_gset_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - - spin_lock_init(&tp->lock); - - /* Get MAC address */ - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = RTL_R8(MAC0 + i); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); - dev->watchdog_timeo = RTL8169_TX_TIMEOUT; - dev->irq = pdev->irq; - dev->base_addr = (unsigned long) ioaddr; - - netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); - - /* don't enable SG, IP_CSUM and TSO by default - it might not work - * properly for all devices */ - dev->features |= NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_HIGHDMA; - - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - /* 8110SCd requires hardware Rx VLAN - disallow toggling */ - dev->hw_features &= ~NETIF_F_HW_VLAN_RX; - - tp->intr_mask = 0xffff; - tp->hw_start = cfg->hw_start; - tp->intr_event = cfg->intr_event; - tp->napi_event = cfg->napi_event; - - tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? - ~(RxBOVF | RxFOVF) : ~0; - - init_timer(&tp->timer); - tp->timer.data = (unsigned long) dev; - tp->timer.function = rtl8169_phy_timer; - - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; - - rc = register_netdev(dev); - if (rc < 0) - goto err_out_msi_4; - - pci_set_drvdata(pdev, dev); - - netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr, - (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); - if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { - netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " - "tx checksumming: %s]\n", - rtl_chip_infos[chipset].jumbo_max, - rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); - } - - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { - rtl8168_driver_start(tp); - } - - device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); - - if (pci_dev_run_wake(pdev)) - pm_runtime_put_noidle(&pdev->dev); - - netif_carrier_off(dev); - -out: - return rc; - -err_out_msi_4: - rtl_disable_msi(pdev, tp); - iounmap(ioaddr); -err_out_free_res_3: - pci_release_regions(pdev); -err_out_mwi_2: - pci_clear_mwi(pdev); - pci_disable_device(pdev); -err_out_free_dev_1: - free_netdev(dev); - goto out; -} - -static void __devexit rtl8169_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { - rtl8168_driver_stop(tp); - } - - cancel_delayed_work_sync(&tp->task); - - unregister_netdev(dev); - - rtl_release_firmware(tp); - - if (pci_dev_run_wake(pdev)) - pm_runtime_get_noresume(&pdev->dev); - - /* restore original MAC address */ - rtl_rar_set(tp, dev->perm_addr); - - rtl_disable_msi(pdev, tp); - rtl8169_release_board(pdev, dev, tp->mmio_addr); - pci_set_drvdata(pdev, NULL); -} - static void rtl_request_uncached_firmware(struct rtl8169_private *tp) { struct rtl_fw *rtl_fw; @@ -4257,78 +3956,6 @@ static void rtl_request_firmware(struct rtl8169_private *tp) rtl_request_uncached_firmware(tp); } -static int rtl8169_open(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - struct pci_dev *pdev = tp->pci_dev; - int retval = -ENOMEM; - - pm_runtime_get_sync(&pdev->dev); - - /* - * Rx and Tx desscriptors needs 256 bytes alignment. - * dma_alloc_coherent provides more. - */ - tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, - &tp->TxPhyAddr, GFP_KERNEL); - if (!tp->TxDescArray) - goto err_pm_runtime_put; - - tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, - &tp->RxPhyAddr, GFP_KERNEL); - if (!tp->RxDescArray) - goto err_free_tx_0; - - retval = rtl8169_init_ring(dev); - if (retval < 0) - goto err_free_rx_1; - - INIT_DELAYED_WORK(&tp->task, NULL); - - smp_mb(); - - rtl_request_firmware(tp); - - retval = request_irq(dev->irq, rtl8169_interrupt, - (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, - dev->name, dev); - if (retval < 0) - goto err_release_fw_2; - - napi_enable(&tp->napi); - - rtl8169_init_phy(dev, tp); - - rtl8169_set_features(dev, dev->features); - - rtl_pll_power_up(tp); - - rtl_hw_start(dev); - - tp->saved_wolopts = 0; - pm_runtime_put_noidle(&pdev->dev); - - rtl8169_check_link_status(dev, tp, ioaddr); -out: - return retval; - -err_release_fw_2: - rtl_release_firmware(tp); - rtl8169_rx_clear(tp); -err_free_rx_1: - dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); - tp->RxDescArray = NULL; -err_free_tx_0: - dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); - tp->TxDescArray = NULL; -err_pm_runtime_put: - pm_runtime_put_noidle(&pdev->dev); - goto out; -} - static void rtl_rx_close(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; @@ -4379,7 +4006,7 @@ static void rtl_hw_start(struct net_device *dev) tp->hw_start(dev); - netif_start_queue(dev); + rtl_irq_enable_all(tp); } static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, @@ -4436,6 +4063,56 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) } } +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); +} + static void rtl_hw_start_8169(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -4506,9 +4183,6 @@ static void rtl_hw_start_8169(struct net_device *dev) /* no early-rx interrupts */ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); - - /* Enable all known interrupts by setting the interrupt mask. */ - RTL_W16(IntrMask, tp->intr_event); } static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) @@ -4888,8 +4562,8 @@ static void rtl_hw_start_8168(struct net_device *dev) /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { - tp->intr_event |= RxFIFOOver | PCSTimeout; - tp->intr_event &= ~RxOverflow; + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; } rtl_set_rx_tx_desc_registers(tp, ioaddr); @@ -4977,8 +4651,6 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(Cfg9346, Cfg9346_Lock); RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); - - RTL_W16(IntrMask, tp->intr_event); } #define R810X_CPCMD_QUIRK_MASK (\ @@ -5077,10 +4749,8 @@ static void rtl_hw_start_8101(struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { - tp->intr_event &= ~RxFIFOOver; - tp->napi_event &= ~RxFIFOOver; - } + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) { @@ -5136,8 +4806,6 @@ static void rtl_hw_start_8101(struct net_device *dev) rtl_set_rx_mode(dev); RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); - - RTL_W16(IntrMask, tp->intr_event); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@ -5328,94 +4996,37 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) { rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); tp->cur_tx = tp->dirty_tx = 0; + netdev_reset_queue(tp->dev); } -static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - PREPARE_DELAYED_WORK(&tp->task, task); - schedule_delayed_work(&tp->task, 4); -} - -static void rtl8169_wait_for_quiescence(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - - synchronize_irq(dev->irq); - - /* Wait for any pending NAPI task to complete */ - napi_disable(&tp->napi); - - rtl8169_irq_mask_and_ack(tp); - - tp->intr_mask = 0xffff; - RTL_W16(IntrMask, tp->intr_event); - napi_enable(&tp->napi); -} - -static void rtl8169_reinit_task(struct work_struct *work) +static void rtl_reset_work(struct rtl8169_private *tp) { - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); - struct net_device *dev = tp->dev; - int ret; - - rtnl_lock(); - - if (!netif_running(dev)) - goto out_unlock; - - rtl8169_wait_for_quiescence(dev); - rtl8169_close(dev); - - ret = rtl8169_open(dev); - if (unlikely(ret < 0)) { - if (net_ratelimit()) - netif_err(tp, drv, dev, - "reinit failure (status = %d). Rescheduling\n", - ret); - rtl8169_schedule_work(dev, rtl8169_reinit_task); - } - -out_unlock: - rtnl_unlock(); -} - -static void rtl8169_reset_task(struct work_struct *work) -{ - struct rtl8169_private *tp = - container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; int i; - rtnl_lock(); - - if (!netif_running(dev)) - goto out_unlock; + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); rtl8169_hw_reset(tp); - rtl8169_wait_for_quiescence(dev); - for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); rtl8169_tx_clear(tp); rtl8169_init_ring_indexes(tp); + napi_enable(&tp->napi); rtl_hw_start(dev); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); - -out_unlock: - rtnl_unlock(); } static void rtl8169_tx_timeout(struct net_device *dev) { - rtl8169_schedule_work(dev, rtl8169_reset_task); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, @@ -5540,6 +5151,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + + skb_tx_timestamp(skb); + wmb(); /* Anti gcc 2.95.3 bugware (sic) */ @@ -5552,9 +5167,22 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, RTL_W8(TxPoll, NPQ); + mmiowb(); + if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); netif_stop_queue(dev); - smp_rmb(); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) netif_wake_queue(dev); } @@ -5618,14 +5246,19 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) rtl8169_hw_reset(tp); - rtl8169_schedule_work(dev, rtl8169_reinit_task); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } -static void rtl8169_tx_interrupt(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) +struct rtl_txc { + int packets; + int bytes; +}; + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { + struct rtl8169_stats *tx_stats = &tp->tx_stats; unsigned int dirty_tx, tx_left; + struct rtl_txc txc = { 0, 0 }; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -5644,18 +5277,34 @@ static void rtl8169_tx_interrupt(struct net_device *dev, rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += tx_skb->skb->len; - dev_kfree_skb(tx_skb->skb); + struct sk_buff *skb = tx_skb->skb; + + txc.packets++; + txc.bytes += skb->len; + dev_kfree_skb(skb); tx_skb->skb = NULL; } dirty_tx++; tx_left--; } + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->packets += txc.packets; + tx_stats->bytes += txc.bytes; + u64_stats_update_end(&tx_stats->syncp); + + netdev_completed_queue(dev, txc.packets, txc.bytes); + if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; - smp_wmb(); + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); if (netif_queue_stopped(dev) && (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { netif_wake_queue(dev); @@ -5666,9 +5315,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR */ - smp_rmb(); - if (tp->cur_tx != dirty_tx) + if (tp->cur_tx != dirty_tx) { + void __iomem *ioaddr = tp->mmio_addr; + RTL_W8(TxPoll, NPQ); + } } } @@ -5707,9 +5358,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, return skb; } -static int rtl8169_rx_interrupt(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr, u32 budget) +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) { unsigned int cur_rx, rx_left; unsigned int count; @@ -5737,14 +5386,26 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (status & RxCRC) dev->stats.rx_crc_errors++; if (status & RxFOVF) { - rtl8169_schedule_work(dev, rtl8169_reset_task); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); dev->stats.rx_fifo_errors++; } + if ((status & (RxRUNT | RxCRC)) && + !(status & (RxRWT | RxFOVF)) && + (dev->features & NETIF_F_RXALL)) + goto process_pkt; + rtl8169_mark_to_asic(desc, rx_buf_sz); } else { struct sk_buff *skb; - dma_addr_t addr = le64_to_cpu(desc->addr); - int pkt_size = (status & 0x00003fff) - 4; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; /* * The driver does not support incoming fragmented @@ -5774,8 +5435,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev, napi_gro_receive(&tp->napi, skb); - dev->stats.rx_bytes += pkt_size; - dev->stats.rx_packets++; + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); } /* Work around for AMD plateform. */ @@ -5798,101 +5461,120 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; int handled = 0; - int status; + u16 status; - /* loop handling interrupts until we have no new ones or - * we hit a invalid/hotplug case. - */ - status = RTL_R16(IntrStatus); - while (status && status != 0xffff) { - status &= tp->intr_event; - if (!status) - break; + status = rtl_get_events(tp); + if (status && status != 0xffff) { + status &= RTL_EVENT_NAPI | tp->event_slow; + if (status) { + handled = 1; - handled = 1; + rtl_irq_disable(tp); + napi_schedule(&tp->napi); + } + } + return IRQ_RETVAL(handled); +} - /* Handle all of the error cases first. These will reset - * the chip, so just exit the loop. - */ - if (unlikely(!netif_running(dev))) { - rtl8169_hw_reset(tp); +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: break; } + } - if (unlikely(status & RxFIFOOver)) { - switch (tp->mac_version) { - /* Work around for rx fifo overflow */ - case RTL_GIGA_MAC_VER_11: - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - goto done; - default: - break; - } - } + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); - if (unlikely(status & SYSErr)) { - rtl8169_pcierr_interrupt(dev); - break; - } + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + + napi_disable(&tp->napi); + rtl_irq_disable(tp); - if (status & LinkChg) - __rtl8169_check_link_status(dev, tp, ioaddr, true); + napi_enable(&tp->napi); + napi_schedule(&tp->napi); +} - /* We need to see the lastest version of tp->intr_mask to - * avoid ignoring an MSI interrupt and having to wait for - * another event which may never come. - */ - smp_rmb(); - if (status & tp->intr_mask & tp->napi_event) { - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; - if (likely(napi_schedule_prep(&tp->napi))) - __napi_schedule(&tp->napi); - else - netif_info(tp, intr, dev, - "interrupt %04x in poll\n", status); - } + rtl_lock_work(tp); - /* We only get a new MSI interrupt when all active irq - * sources on the chip have been acknowledged. So, ack - * everything we've seen and check if new sources have become - * active to avoid blocking all interrupts from the chip. - */ - RTL_W16(IntrStatus, - (status & RxFIFOOver) ? (status | RxOverflow) : status); - status = RTL_R16(IntrStatus); + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); } -done: - return IRQ_RETVAL(handled); + +out_unlock: + rtl_unlock_work(tp); } static int rtl8169_poll(struct napi_struct *napi, int budget) { struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); struct net_device *dev = tp->dev; - void __iomem *ioaddr = tp->mmio_addr; - int work_done; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done= 0; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + if (status & RTL_EVENT_NAPI_RX) + work_done = rtl_rx(dev, tp, (u32) budget); + + if (status & RTL_EVENT_NAPI_TX) + rtl_tx(dev, tp); - work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); - rtl8169_tx_interrupt(dev, tp, ioaddr); + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; + + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } if (work_done < budget) { napi_complete(napi); - /* We need for force the visibility of tp->intr_mask - * for other CPUs, as we can loose an MSI interrupt - * and potentially wait for a retransmit timeout if we don't. - * The posted write to IntrMask is safe, as it will - * eventually make it to the chip and we won't loose anything - * until it does. - */ - tp->intr_mask = 0xffff; - wmb(); - RTL_W16(IntrMask, tp->intr_event); + rtl_irq_enable(tp, enable_mask); + mmiowb(); } return work_done; @@ -5916,26 +5598,19 @@ static void rtl8169_down(struct net_device *dev) del_timer_sync(&tp->timer); - netif_stop_queue(dev); - napi_disable(&tp->napi); - - spin_lock_irq(&tp->lock); + netif_stop_queue(dev); rtl8169_hw_reset(tp); /* * At this point device interrupts can not be enabled in any function, - * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, - * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). */ rtl8169_rx_missed(dev, ioaddr); - spin_unlock_irq(&tp->lock); - - synchronize_irq(dev->irq); - /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ + synchronize_sched(); rtl8169_tx_clear(tp); @@ -5954,9 +5629,13 @@ static int rtl8169_close(struct net_device *dev) /* Update counters before going down */ rtl8169_update_counters(dev); + rtl_lock_work(tp); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl8169_down(dev); + rtl_unlock_work(tp); - free_irq(dev->irq, dev); + free_irq(pdev->irq, dev); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -5970,77 +5649,127 @@ static int rtl8169_close(struct net_device *dev) return 0; } -static void rtl_set_rx_mode(struct net_device *dev) +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->pci_dev->irq, dev); +} +#endif + +static int rtl_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; - u32 mc_filter[2]; /* Multicast hash filter */ - int rx_mode; - u32 tmp = 0; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; - if (dev->flags & IFF_PROMISC) { - /* Unconditionally log net taps. */ - netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); - rx_mode = - AcceptBroadcast | AcceptMulticast | AcceptMyPhys | - AcceptAllPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else if ((netdev_mc_count(dev) > multicast_filter_limit) || - (dev->flags & IFF_ALLMULTI)) { - /* Too many to filter perfectly -- accept all multicasts. */ - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0xffffffff; - } else { - struct netdev_hw_addr *ha; + pm_runtime_get_sync(&pdev->dev); - rx_mode = AcceptBroadcast | AcceptMyPhys; - mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(ha, dev) { - int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - rx_mode |= AcceptMulticast; - } - } + /* + * Rx and Tx desscriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; - spin_lock_irqsave(&tp->lock, flags); + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; - tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; - if (tp->mac_version > RTL_GIGA_MAC_VER_06) { - u32 data = mc_filter[0]; + INIT_WORK(&tp->wk.work, rtl_task); - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); - } + smp_mb(); - RTL_W32(MAR0 + 4, mc_filter[1]); - RTL_W32(MAR0 + 0, mc_filter[0]); + rtl_request_firmware(tp); - RTL_W32(RxConfig, tmp); + retval = request_irq(pdev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); - spin_unlock_irqrestore(&tp->lock, flags); + rtl8169_init_phy(dev, tp); + + __rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + netif_start_queue(dev); + + rtl_unlock_work(tp); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; + +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; } -/** - * rtl8169_get_stats - Get rtl8169 read/write statistics - * @dev: The Ethernet Device to get statistics for - * - * Get TX/RX statistics for rtl8169 - */ -static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; + unsigned int start; - if (netif_running(dev)) { - spin_lock_irqsave(&tp->lock, flags); + if (netif_running(dev)) rtl8169_rx_missed(dev, ioaddr); - spin_unlock_irqrestore(&tp->lock, flags); - } - return &dev->stats; + do { + start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + + + do { + start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + + return stats; } static void rtl8169_net_suspend(struct net_device *dev) @@ -6050,10 +5779,15 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; - rtl_pll_power_down(tp); - netif_device_detach(dev); netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); } #ifdef CONFIG_PM @@ -6076,7 +5810,12 @@ static void __rtl8169_resume(struct net_device *dev) rtl_pll_power_up(tp); - rtl8169_schedule_work(dev, rtl8169_reset_task); + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } static int rtl8169_resume(struct device *device) @@ -6102,10 +5841,10 @@ static int rtl8169_runtime_suspend(struct device *device) if (!tp->TxDescArray) return 0; - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); rtl8169_net_suspend(dev); @@ -6121,10 +5860,10 @@ static int rtl8169_runtime_resume(struct device *device) if (!tp->TxDescArray) return 0; - spin_lock_irq(&tp->lock); + rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); tp->saved_wolopts = 0; - spin_unlock_irq(&tp->lock); + rtl_unlock_work(tp); rtl8169_init_phy(dev, tp); @@ -6186,18 +5925,17 @@ static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); rtl8169_net_suspend(dev); /* Restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); - spin_lock_irq(&tp->lock); - rtl8169_hw_reset(tp); - spin_unlock_irq(&tp->lock); - if (system_state == SYSTEM_POWER_OFF) { if (__rtl8169_get_wol(tp) & WAKE_ANY) { rtl_wol_suspend_quirk(tp); @@ -6207,13 +5945,362 @@ static void rtl_shutdown(struct pci_dev *pdev) pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } + + pm_runtime_put_noidle(d); +} + +static void __devexit rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_stop(tp); + } + + cancel_work_sync(&tp->wk.work); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); + pci_set_drvdata(pdev, NULL); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 event_slow; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct rtl8169_private *tp, + const struct rtl_cfg_info *cfg) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); + return msi; +} + +static int __devinit +rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = RxChkSum; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(tp, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + mutex_init(&tp->wk.mutex); + + /* Get MAC address */ + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_RX; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_msi_4: + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; } static struct pci_driver rtl8169_pci_driver = { .name = MODULENAME, .id_table = rtl8169_pci_tbl, - .probe = rtl8169_init_one, - .remove = __devexit_p(rtl8169_remove_one), + .probe = rtl_init_one, + .remove = __devexit_p(rtl_remove_one), .shutdown = rtl_shutdown, .driver.pm = RTL8169_PM_OPS, }; |