diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 213 |
1 files changed, 54 insertions, 159 deletions
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cfaf17b70f3f..c92c3b7876ca 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -215,139 +215,23 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) return received; } -/* non NAPI receive function */ -static int fs_enet_rx_non_napi(struct net_device *dev) +static int fs_enet_tx_napi(struct napi_struct *napi, int budget) { - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - cbd_t __iomem *bdp; - struct sk_buff *skb, *skbn, *skbt; - int received = 0; - u16 pkt_len, sc; - int curidx; - /* - * First, grab all of the stats for the incoming packet. - * These get messed up if we get called due to a busy condition. - */ - bdp = fep->cur_rx; - - while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { - - curidx = bdp - fep->rx_bd_base; - - /* - * Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((sc & BD_ENET_RX_LAST) == 0) - dev_warn(fep->dev, "rcv is not +last\n"); - - /* - * Check for errors. - */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | - BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - fep->stats.rx_errors++; - /* Frame too long or too short. */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) - fep->stats.rx_length_errors++; - /* Frame alignment */ - if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) - fep->stats.rx_frame_errors++; - /* CRC Error */ - if (sc & BD_ENET_RX_CR) - fep->stats.rx_crc_errors++; - /* FIFO overrun */ - if (sc & BD_ENET_RX_OV) - fep->stats.rx_crc_errors++; - - skb = fep->rx_skbuff[curidx]; - - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); - - skbn = skb; - - } else { - - skb = fep->rx_skbuff[curidx]; - - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); - - /* - * Process the incoming frame. - */ - fep->stats.rx_packets++; - pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ - fep->stats.rx_bytes += pkt_len + 4; - - if (pkt_len <= fpi->rx_copybreak) { - /* +2 to make IP header L1 cache aligned */ - skbn = netdev_alloc_skb(dev, pkt_len + 2); - if (skbn != NULL) { - skb_reserve(skbn, 2); /* align IP header */ - skb_copy_from_linear_data(skb, - skbn->data, pkt_len); - /* swap */ - skbt = skb; - skb = skbn; - skbn = skbt; - } - } else { - skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - - if (skbn) - skb_align(skbn, ENET_RX_ALIGN); - } - - if (skbn != NULL) { - skb_put(skb, pkt_len); /* Make room */ - skb->protocol = eth_type_trans(skb, dev); - received++; - netif_rx(skb); - } else { - fep->stats.rx_dropped++; - skbn = skb; - } - } - - fep->rx_skbuff[curidx] = skbn; - CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); - CBDW_DATLEN(bdp, 0); - CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); - - /* - * Update BD pointer to next entry. - */ - if ((sc & BD_ENET_RX_WRAP) == 0) - bdp++; - else - bdp = fep->rx_bd_base; - - (*fep->ops->rx_bd_done)(dev); - } - - fep->cur_rx = bdp; - - return 0; -} - -static void fs_enet_tx(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, + napi_tx); + struct net_device *dev = fep->ndev; cbd_t __iomem *bdp; struct sk_buff *skb; int dirtyidx, do_wake, do_restart; u16 sc; + int has_tx_work = 0; spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; + /* clear TX status bits for napi*/ + (*fep->ops->napi_clear_tx_event)(dev); + do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { dirtyidx = bdp - fep->tx_bd_base; @@ -400,7 +284,7 @@ static void fs_enet_tx(struct net_device *dev) /* * Free the sk buffer associated with this last transmit. */ - dev_kfree_skb_irq(skb); + dev_kfree_skb(skb); fep->tx_skbuff[dirtyidx] = NULL; /* @@ -417,6 +301,7 @@ static void fs_enet_tx(struct net_device *dev) */ if (!fep->tx_free++) do_wake = 1; + has_tx_work = 1; } fep->dirty_tx = bdp; @@ -424,10 +309,19 @@ static void fs_enet_tx(struct net_device *dev) if (do_restart) (*fep->ops->tx_restart)(dev); + if (!has_tx_work) { + napi_complete(napi); + (*fep->ops->napi_enable_tx)(dev); + } + spin_unlock(&fep->tx_lock); if (do_wake) netif_wake_queue(dev); + + if (has_tx_work) + return budget; + return 0; } /* @@ -453,8 +347,7 @@ fs_enet_interrupt(int irq, void *dev_id) nr++; int_clr_events = int_events; - if (fpi->use_napi) - int_clr_events &= ~fep->ev_napi_rx; + int_clr_events &= ~fep->ev_napi_rx; (*fep->ops->clear_int_events)(dev, int_clr_events); @@ -462,23 +355,28 @@ fs_enet_interrupt(int irq, void *dev_id) (*fep->ops->ev_error)(dev, int_events); if (int_events & fep->ev_rx) { - if (!fpi->use_napi) - fs_enet_rx_non_napi(dev); - else { - napi_ok = napi_schedule_prep(&fep->napi); - - (*fep->ops->napi_disable_rx)(dev); - (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); - - /* NOTE: it is possible for FCCs in NAPI mode */ - /* to submit a spurious interrupt while in poll */ - if (napi_ok) - __napi_schedule(&fep->napi); - } + napi_ok = napi_schedule_prep(&fep->napi); + + (*fep->ops->napi_disable_rx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi); } - if (int_events & fep->ev_tx) - fs_enet_tx(dev); + if (int_events & fep->ev_tx) { + napi_ok = napi_schedule_prep(&fep->napi_tx); + + (*fep->ops->napi_disable_tx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi_tx); + } } handled = nr > 0; @@ -611,7 +509,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) cbd_t __iomem *bdp; int curidx; u16 sc; - unsigned long flags; #ifdef CONFIG_FS_ENET_MPC5121_FEC if (((unsigned long)skb->data) & 0x3) { @@ -626,7 +523,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #endif - spin_lock_irqsave(&fep->tx_lock, flags); + spin_lock(&fep->tx_lock); /* * Fill in a Tx ring entry @@ -635,7 +532,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { netif_stop_queue(dev); - spin_unlock_irqrestore(&fep->tx_lock, flags); + spin_unlock(&fep->tx_lock); /* * Ooops. All transmit buffers are full. Bail out. @@ -691,7 +588,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) (*fep->ops->tx_kickstart)(dev); - spin_unlock_irqrestore(&fep->tx_lock, flags); + spin_unlock(&fep->tx_lock); return NETDEV_TX_OK; } @@ -811,24 +708,24 @@ static int fs_enet_open(struct net_device *dev) /* not doing this, will cause a crash in fs_enet_rx_napi */ fs_init_bds(fep->ndev); - if (fep->fpi->use_napi) - napi_enable(&fep->napi); + napi_enable(&fep->napi); + napi_enable(&fep->napi_tx); /* Install our interrupt handler. */ r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, "fs_enet-mac", dev); if (r != 0) { dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); return -EINVAL; } err = fs_init_phy(dev); if (err) { free_irq(fep->interrupt, dev); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); return err; } phy_start(fep->phydev); @@ -845,8 +742,8 @@ static int fs_enet_close(struct net_device *dev) netif_stop_queue(dev); netif_carrier_off(dev); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); @@ -1022,7 +919,6 @@ static int fs_enet_probe(struct platform_device *ofdev) fpi->rx_ring = 32; fpi->tx_ring = 32; fpi->rx_copybreak = 240; - fpi->use_napi = 1; fpi->napi_weight = 17; fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { @@ -1033,7 +929,7 @@ static int fs_enet_probe(struct platform_device *ofdev) /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - fpi->phy_node = ofdev->dev.of_node; + fpi->phy_node = of_node_get(ofdev->dev.of_node); } if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { @@ -1102,9 +998,8 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; - if (fpi->use_napi) - netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, - fpi->napi_weight); + netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); + netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); ndev->ethtool_ops = &fs_ethtool_ops; |