diff options
Diffstat (limited to 'drivers/net/ethernet/xilinx/ll_temac_main.c')
-rw-r--r-- | drivers/net/ethernet/xilinx/ll_temac_main.c | 370 |
1 files changed, 293 insertions, 77 deletions
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 6f11f52c9a9e..3e313e71ae36 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -51,14 +51,18 @@ #include <linux/ip.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <linux/dma-mapping.h> #include <linux/processor.h> #include <linux/platform_data/xilinx-ll-temac.h> #include "ll_temac.h" -#define TX_BD_NUM 64 -#define RX_BD_NUM 128 +/* Descriptors defines for Tx and Rx DMA */ +#define TX_BD_NUM_DEFAULT 64 +#define RX_BD_NUM_DEFAULT 1024 +#define TX_BD_NUM_MAX 4096 +#define RX_BD_NUM_MAX 4096 /* --------------------------------------------------------------------- * Low level register access functions @@ -300,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev) /* Reset Local Link (DMA) */ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); - for (i = 0; i < RX_BD_NUM; i++) { + for (i = 0; i < lp->rx_bd_num; i++) { if (!lp->rx_skb[i]) break; else { @@ -311,12 +315,12 @@ static void temac_dma_bd_release(struct net_device *ndev) } if (lp->rx_bd_v) dma_free_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, - lp->rx_bd_v, lp->rx_bd_p); + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, + lp->rx_bd_v, lp->rx_bd_p); if (lp->tx_bd_v) dma_free_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, - lp->tx_bd_v, lp->tx_bd_p); + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, + lp->tx_bd_v, lp->tx_bd_p); } /** @@ -329,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev) dma_addr_t skb_dma_addr; int i; - lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), - GFP_KERNEL); + lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num, + sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) goto out; /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->tx_bd_v) * TX_BD_NUM, + sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, - sizeof(*lp->rx_bd_v) * RX_BD_NUM, + sizeof(*lp->rx_bd_v) * lp->rx_bd_num, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; - for (i = 0; i < TX_BD_NUM; i++) { + for (i = 0; i < lp->tx_bd_num; i++) { lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p - + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); + + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); } - for (i = 0; i < RX_BD_NUM; i++) { + for (i = 0; i < lp->rx_bd_num; i++) { lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p - + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); + + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); @@ -367,32 +371,36 @@ static int temac_dma_bd_init(struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) + goto out; lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); } /* Configure DMA channel (irq setup) */ - lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl | + lp->dma_out(lp, TX_CHNL_CTRL, + lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 | 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); - lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl | + lp->dma_out(lp, RX_CHNL_CTRL, + lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 | CHNL_CTRL_IRQ_IOE | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* Init descriptor indexes */ lp->tx_bd_ci = 0; - lp->tx_bd_next = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; + lp->rx_bd_tail = lp->rx_bd_num - 1; /* Enable RX DMA transfers */ wmb(); lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); lp->dma_out(lp, RX_TAILDESC_PTR, - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); /* Prepare for TX DMA transfer */ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); @@ -781,13 +789,16 @@ static void temac_start_xmit_done(struct net_device *ndev) ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); lp->tx_bd_ci++; - if (lp->tx_bd_ci >= TX_BD_NUM) + if (lp->tx_bd_ci >= lp->tx_bd_num) lp->tx_bd_ci = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; stat = be32_to_cpu(cur_p->app0); } + /* Matches barrier in temac_start_xmit */ + smp_mb(); + netif_wake_queue(ndev); } @@ -804,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) return NETDEV_TX_BUSY; tail++; - if (tail >= TX_BD_NUM) + if (tail >= lp->tx_bd_num) tail = 0; cur_p = &lp->tx_bd_v[tail]; @@ -819,20 +830,29 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; - dma_addr_t start_p, tail_p, skb_dma_addr; + dma_addr_t tail_p, skb_dma_addr; int ii; unsigned long num_frag; skb_frag_t *frag; num_frag = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[0]; - start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; if (temac_check_tx_bd_space(lp, num_frag + 1)) { - if (!netif_queue_stopped(ndev)) - netif_stop_queue(ndev); - return NETDEV_TX_BUSY; + if (netif_queue_stopped(ndev)) + return NETDEV_TX_BUSY; + + netif_stop_queue(ndev); + + /* Matches barrier in temac_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (temac_check_tx_bd_space(lp, num_frag)) + return NETDEV_TX_BUSY; + + netif_wake_queue(ndev); } cur_p->app0 = 0; @@ -850,12 +870,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); cur_p->len = cpu_to_be32(skb_headlen(skb)); + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); ptr_to_txbd((void *)skb, cur_p); for (ii = 0; ii < num_frag; ii++) { - lp->tx_bd_tail++; - if (lp->tx_bd_tail >= TX_BD_NUM) + if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; @@ -863,6 +887,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = lp->tx_bd_num - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + while (--ii >= 0) { + --frag; + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (--lp->tx_bd_tail < 0) + lp->tx_bd_tail = lp->tx_bd_num - 1; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + } + dma_unmap_single(ndev->dev.parent, + be32_to_cpu(cur_p->phys), + skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } cur_p->phys = cpu_to_be32(skb_dma_addr); cur_p->len = cpu_to_be32(skb_frag_size(frag)); cur_p->app0 = 0; @@ -872,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; lp->tx_bd_tail++; - if (lp->tx_bd_tail >= TX_BD_NUM) + if (lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; skb_tx_timestamp(skb); @@ -884,31 +929,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +static int ll_temac_recv_buffers_available(struct temac_local *lp) +{ + int available; + + if (!lp->rx_skb[lp->rx_bd_ci]) + return 0; + available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; + if (available <= 0) + available += lp->rx_bd_num; + return available; +} static void ll_temac_recv(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - struct sk_buff *skb, *new_skb; - unsigned int bdstat; - struct cdmac_bd *cur_p; - dma_addr_t tail_p, skb_dma_addr; - int length; unsigned long flags; + int rx_bd; + bool update_tail = false; spin_lock_irqsave(&lp->rx_lock, flags); - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - - bdstat = be32_to_cpu(cur_p->app0); - while ((bdstat & STS_CTRL_APP0_CMPLT)) { + /* Process all received buffers, passing them on network + * stack. After this, the buffer descriptors will be in an + * un-allocated stage, where no skb is allocated for it, and + * they are therefore not available for TEMAC/DMA. + */ + do { + struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; + struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; + unsigned int bdstat = be32_to_cpu(bd->app0); + int length; + + /* While this should not normally happen, we can end + * here when GFP_ATOMIC allocations fail, and we + * therefore have un-allocated buffers. + */ + if (!skb) + break; - skb = lp->rx_skb[lp->rx_bd_ci]; - length = be32_to_cpu(cur_p->app4) & 0x3FFF; + /* Loop over all completed buffer descriptors */ + if (!(bdstat & STS_CTRL_APP0_CMPLT)) + break; - dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), + dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + /* The buffer is not valid for DMA anymore */ + bd->phys = 0; + bd->len = 0; + length = be32_to_cpu(bd->app4) & 0x3FFF; skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); @@ -923,43 +993,102 @@ static void ll_temac_recv(struct net_device *ndev) * (back) for proper IP checksum byte order * (be16). */ - skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF); + skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); + /* The skb buffer is now owned by network stack above */ + lp->rx_skb[lp->rx_bd_ci] = NULL; ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; - new_skb = netdev_alloc_skb_ip_align(ndev, - XTE_MAX_JUMBO_FRAME_SIZE); - if (!new_skb) { - spin_unlock_irqrestore(&lp->rx_lock, flags); - return; + rx_bd = lp->rx_bd_ci; + if (++lp->rx_bd_ci >= lp->rx_bd_num) + lp->rx_bd_ci = 0; + } while (rx_bd != lp->rx_bd_tail); + + /* DMA operations will halt when the last buffer descriptor is + * processed (ie. the one pointed to by RX_TAILDESC_PTR). + * When that happens, no more interrupt events will be + * generated. No IRQ_COAL or IRQ_DLY, and not even an + * IRQ_ERR. To avoid stalling, we schedule a delayed work + * when there is a potential risk of that happening. The work + * will call this function, and thus re-schedule itself until + * enough buffers are available again. + */ + if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) + schedule_delayed_work(&lp->restart_work, HZ / 1000); + + /* Allocate new buffers for those buffer descriptors that were + * passed to network stack. Note that GFP_ATOMIC allocations + * can fail (e.g. when a larger burst of GFP_ATOMIC + * allocations occurs), so while we try to allocate all + * buffers in the same interrupt where they were processed, we + * continue with what we could get in case of allocation + * failure. Allocation of remaining buffers will be retried + * in following calls. + */ + while (1) { + struct sk_buff *skb; + struct cdmac_bd *bd; + dma_addr_t skb_dma_addr; + + rx_bd = lp->rx_bd_tail + 1; + if (rx_bd >= lp->rx_bd_num) + rx_bd = 0; + bd = &lp->rx_bd_v[rx_bd]; + + if (bd->phys) + break; /* All skb's allocated */ + + skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); + if (!skb) { + dev_warn(&ndev->dev, "skb alloc failed\n"); + break; } - cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); - skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data, + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - cur_p->phys = cpu_to_be32(skb_dma_addr); - cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); - lp->rx_skb[lp->rx_bd_ci] = new_skb; + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, + skb_dma_addr))) { + dev_kfree_skb_any(skb); + break; + } - lp->rx_bd_ci++; - if (lp->rx_bd_ci >= RX_BD_NUM) - lp->rx_bd_ci = 0; + bd->phys = cpu_to_be32(skb_dma_addr); + bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); + bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); + lp->rx_skb[rx_bd] = skb; - cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - bdstat = be32_to_cpu(cur_p->app0); + lp->rx_bd_tail = rx_bd; + update_tail = true; + } + + /* Move tail pointer when buffers have been allocated */ + if (update_tail) { + lp->dma_out(lp, RX_TAILDESC_PTR, + lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); } - lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); spin_unlock_irqrestore(&lp->rx_lock, flags); } +/* Function scheduled to ensure a restart in case of DMA halt + * condition caused by running out of buffer descriptors. + */ +static void ll_temac_restart_work_func(struct work_struct *work) +{ + struct temac_local *lp = container_of(work, struct temac_local, + restart_work.work); + struct net_device *ndev = lp->ndev; + + ll_temac_recv(ndev); +} + static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; @@ -1052,6 +1181,8 @@ static int temac_stop(struct net_device *ndev) dev_dbg(&ndev->dev, "temac_close()\n"); + cancel_delayed_work_sync(&lp->restart_work); + free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); @@ -1122,13 +1253,96 @@ static const struct attribute_group temac_attr_group = { .attrs = temac_device_attrs, }; -/* ethtool support */ +/* --------------------------------------------------------------------- + * ethtool support + */ + +static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct temac_local *lp = netdev_priv(ndev); + + ering->rx_max_pending = RX_BD_NUM_MAX; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->tx_max_pending = TX_BD_NUM_MAX; + ering->rx_pending = lp->rx_bd_num; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->tx_pending = lp->tx_bd_num; +} + +static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (ering->rx_pending > RX_BD_NUM_MAX || + ering->rx_mini_pending || + ering->rx_jumbo_pending || + ering->rx_pending > TX_BD_NUM_MAX) + return -EINVAL; + + if (netif_running(ndev)) + return -EBUSY; + + lp->rx_bd_num = ering->rx_pending; + lp->tx_bd_num = ering->tx_pending; + return 0; +} + +static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec) +{ + struct temac_local *lp = netdev_priv(ndev); + + ec->rx_max_coalesced_frames = lp->coalesce_count_rx; + ec->tx_max_coalesced_frames = lp->coalesce_count_tx; + ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100; + ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100; + return 0; +} + +static int ll_temac_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + netdev_err(ndev, + "Please stop netif before applying configuration\n"); + return -EFAULT; + } + + if (ec->rx_max_coalesced_frames) + lp->coalesce_count_rx = ec->rx_max_coalesced_frames; + if (ec->tx_max_coalesced_frames) + lp->coalesce_count_tx = ec->tx_max_coalesced_frames; + /* With typical LocalLink clock speed of 200 MHz and + * C_PRESCALAR=1023, each delay count corresponds to 5.12 us. + */ + if (ec->rx_coalesce_usecs) + lp->coalesce_delay_rx = + min(255U, (ec->rx_coalesce_usecs * 100) / 512); + if (ec->tx_coalesce_usecs) + lp->coalesce_delay_tx = + min(255U, (ec->tx_coalesce_usecs * 100) / 512); + + return 0; +} + static const struct ethtool_ops temac_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = ll_temac_ethtools_get_ringparam, + .set_ringparam = ll_temac_ethtools_set_ringparam, + .get_coalesce = ll_temac_ethtools_get_coalesce, + .set_coalesce = ll_temac_ethtools_set_coalesce, }; static int temac_probe(struct platform_device *pdev) @@ -1172,7 +1386,10 @@ static int temac_probe(struct platform_device *pdev) lp->ndev = ndev; lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; + lp->rx_bd_num = RX_BD_NUM_DEFAULT; + lp->tx_bd_num = TX_BD_NUM_DEFAULT; spin_lock_init(&lp->rx_lock); + INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); /* Setup mutex for synchronization of indirect register access */ if (pdata) { @@ -1235,6 +1452,14 @@ static int temac_probe(struct platform_device *pdev) /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; + /* Defaults for IRQ delay/coalescing setup. These are + * configuration values, so does not belong in device-tree. + */ + lp->coalesce_delay_tx = 0x10; + lp->coalesce_count_tx = 0x22; + lp->coalesce_delay_rx = 0xff; + lp->coalesce_count_rx = 0x07; + /* Setup LocalLink DMA */ if (temac_np) { /* Find the DMA node, map the DMA registers, and @@ -1273,13 +1498,6 @@ static int temac_probe(struct platform_device *pdev) lp->rx_irq = irq_of_parse_and_map(dma_np, 0); lp->tx_irq = irq_of_parse_and_map(dma_np, 1); - /* Use defaults for IRQ delay/coalescing setup. These - * are configuration values, so does not belong in - * device-tree. - */ - lp->tx_chnl_ctrl = 0x10220000; - lp->rx_chnl_ctrl = 0xff070000; - /* Finished with the DMA node; drop the reference */ of_node_put(dma_np); } else if (pdata) { @@ -1305,16 +1523,14 @@ static int temac_probe(struct platform_device *pdev) lp->tx_irq = platform_get_irq(pdev, 1); /* IRQ delay/coalescing setup */ - if (pdata->tx_irq_timeout || pdata->tx_irq_count) - lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) | - (pdata->tx_irq_count << 16); - else - lp->tx_chnl_ctrl = 0x10220000; - if (pdata->rx_irq_timeout || pdata->rx_irq_count) - lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | - (pdata->rx_irq_count << 16); - else - lp->rx_chnl_ctrl = 0xff070000; + if (pdata->tx_irq_timeout || pdata->tx_irq_count) { + lp->coalesce_delay_tx = pdata->tx_irq_timeout; + lp->coalesce_count_tx = pdata->tx_irq_count; + } + if (pdata->rx_irq_timeout || pdata->rx_irq_count) { + lp->coalesce_delay_rx = pdata->rx_irq_timeout; + lp->coalesce_count_rx = pdata->rx_irq_count; + } } /* Error handle returned DMA RX and TX interrupts */ |