diff options
Diffstat (limited to 'drivers/net/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/stmmac/stmmac_main.c | 135 |
1 files changed, 51 insertions, 84 deletions
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 0e5f03135b50..e15c4a0bb96d 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -116,9 +116,6 @@ static int tc = TC_DEFAULT; module_param(tc, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(tc, "DMA threshold control value"); -#define RX_NO_COALESCE 1 /* Always interrupt on completion */ -#define TX_NO_COALESCE -1 /* No moderation by default */ - /* Pay attention to tune this parameter; take care of both * hardware capability and network stabitily/performance impact. * Many tests showed that ~4ms latency seems to be good enough. */ @@ -139,7 +136,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); -static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev); /** * stmmac_verify_args - verify the driver parameters. @@ -750,7 +746,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); priv->xstats.threshold = tc; } - stmmac_tx_err(priv); } else if (unlikely(status == tx_hard_error)) stmmac_tx_err(priv); } @@ -781,21 +776,6 @@ static int stmmac_open(struct net_device *dev) stmmac_verify_args(); - ret = stmmac_init_phy(dev); - if (unlikely(ret)) { - pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - return ret; - } - - /* Request the IRQ lines */ - ret = request_irq(dev->irq, stmmac_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(ret < 0)) { - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", - __func__, dev->irq, ret); - return ret; - } - #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { @@ -814,6 +794,11 @@ static int stmmac_open(struct net_device *dev) } else priv->tm->enable = 1; #endif + ret = stmmac_init_phy(dev); + if (unlikely(ret)) { + pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); + goto open_error; + } /* Create and initialize the TX/RX descriptors chains. */ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); @@ -822,12 +807,11 @@ static int stmmac_open(struct net_device *dev) init_dma_desc_rings(dev); /* DMA initialization and SW reset */ - if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, - priv->dma_tx_phy, - priv->dma_rx_phy) < 0)) { - + ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, + priv->dma_tx_phy, priv->dma_rx_phy); + if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); - return -1; + goto open_error; } /* Copy the MAC addr into the HW */ @@ -843,11 +827,21 @@ static int stmmac_open(struct net_device *dev) pr_info("stmmac: Rx Checksum Offload Engine supported\n"); if (priv->plat->tx_coe) pr_info("\tTX Checksum insertion supported\n"); + netdev_update_features(dev); /* Initialise the MMC (if present) to disable all interrupts. */ writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); + /* Request the IRQ lines */ + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + goto open_error; + } + /* Enable the MAC Rx/Tx */ stmmac_enable_mac(priv->ioaddr); @@ -878,7 +872,17 @@ static int stmmac_open(struct net_device *dev) napi_enable(&priv->napi); skb_queue_head_init(&priv->rx_recycle); netif_start_queue(dev); + return 0; + +open_error: +#ifdef CONFIG_STMMAC_TIMER + kfree(priv->tm); +#endif + if (priv->phydev) + phy_disconnect(priv->phydev); + + return ret; } /** @@ -927,46 +931,6 @@ static int stmmac_release(struct net_device *dev) return 0; } -/* - * To perform emulated hardware segmentation on skb. - */ -static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb) -{ - struct sk_buff *segs, *curr_skb; - int gso_segs = skb_shinfo(skb)->gso_segs; - - /* Estimate the number of fragments in the worst case */ - if (unlikely(stmmac_tx_avail(priv) < gso_segs)) { - netif_stop_queue(priv->dev); - TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n", - __func__); - if (stmmac_tx_avail(priv) < gso_segs) - return NETDEV_TX_BUSY; - - netif_wake_queue(priv->dev); - } - TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n", - skb, skb->len); - - segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) - goto sw_tso_end; - - do { - curr_skb = segs; - segs = segs->next; - TX_DBG("\t\tcurrent skb->len: %d, *curr %p," - "*next %p\n", curr_skb->len, curr_skb, segs); - curr_skb->next = NULL; - stmmac_xmit(curr_skb, priv->dev); - } while (segs); - -sw_tso_end: - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, struct net_device *dev, int csum_insertion) @@ -1044,16 +1008,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) !skb_is_gso(skb) ? "isn't" : "is"); #endif - if (unlikely(skb_is_gso(skb))) - return stmmac_sw_tso(priv, skb); - - if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { - if (unlikely((!priv->plat->tx_coe) || - (priv->no_csum_insertion))) - skb_checksum_help(skb); - else - csum_insertion = 1; - } + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); desc = priv->dma_tx + entry; first = desc; @@ -1373,18 +1328,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; } + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static u32 stmmac_fix_features(struct net_device *dev, u32 features) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->rx_coe) + features &= ~NETIF_F_RXCSUM; + if (!priv->plat->tx_coe) + features &= ~NETIF_F_ALL_CSUM; + /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable * the TX csum insertionin the TDES and not use SF. */ - if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) - priv->no_csum_insertion = 1; - else - priv->no_csum_insertion = 0; + if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_ALL_CSUM; - dev->mtu = new_mtu; - - return 0; + return features; } static irqreturn_t stmmac_interrupt(int irq, void *dev_id) @@ -1464,6 +1430,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_start_xmit = stmmac_xmit, .ndo_stop = stmmac_release, .ndo_change_mtu = stmmac_change_mtu, + .ndo_fix_features = stmmac_fix_features, .ndo_set_multicast_list = stmmac_multicast_list, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, @@ -1494,8 +1461,8 @@ static int stmmac_probe(struct net_device *dev) dev->netdev_ops = &stmmac_netdev_ops; stmmac_set_ethtool_ops(dev); - dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ |