diff options
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r-- | drivers/net/ethernet/cadence/macb.c | 329 |
1 files changed, 254 insertions, 75 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ec09fcece711..538544a7c642 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -32,19 +32,28 @@ #include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> - +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/tcp.h> #include "macb.h" #define MACB_RX_BUFFER_SIZE 128 #define RX_BUFFER_MULTIPLE 64 /* bytes */ -#define RX_RING_SIZE 512 /* must be power of 2 */ -#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) -#define TX_RING_SIZE 128 /* must be power of 2 */ -#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) +#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ +#define MIN_RX_RING_SIZE 64 +#define MAX_RX_RING_SIZE 8192 +#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ + * (bp)->rx_ring_size) + +#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ +#define MIN_TX_RING_SIZE 64 +#define MAX_TX_RING_SIZE 4096 +#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ + * (bp)->tx_ring_size) /* level of occupied TX descriptors under which we wake up TX process */ -#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) +#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | MACB_BIT(ISR_ROVR)) @@ -53,10 +62,13 @@ | MACB_BIT(TXERR)) #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) -#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) -#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) +/* Max length of transmit frame must be a multiple of 8 bytes */ +#define MACB_TX_LEN_ALIGN 8 +#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) +#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) -#define GEM_MTU_MIN_SIZE 68 +#define GEM_MTU_MIN_SIZE ETH_MIN_MTU +#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO) #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) @@ -67,45 +79,47 @@ #define MACB_HALT_TIMEOUT 1230 /* Ring buffer accessors */ -static unsigned int macb_tx_ring_wrap(unsigned int index) +static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) { - return index & (TX_RING_SIZE - 1); + return index & (bp->tx_ring_size - 1); } static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, unsigned int index) { - return &queue->tx_ring[macb_tx_ring_wrap(index)]; + return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; } static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, unsigned int index) { - return &queue->tx_skb[macb_tx_ring_wrap(index)]; + return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; } static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) { dma_addr_t offset; - offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); + offset = macb_tx_ring_wrap(queue->bp, index) * + sizeof(struct macb_dma_desc); return queue->tx_ring_dma + offset; } -static unsigned int macb_rx_ring_wrap(unsigned int index) +static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) { - return index & (RX_RING_SIZE - 1); + return index & (bp->rx_ring_size - 1); } static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) { - return &bp->rx_ring[macb_rx_ring_wrap(index)]; + return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; } static void *macb_rx_buffer(struct macb *bp, unsigned int index) { - return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); + return bp->rx_buffers + bp->rx_buffer_size * + macb_rx_ring_wrap(bp, index); } /* I/O accessors */ @@ -608,7 +622,8 @@ static void macb_tx_error_task(struct work_struct *work) */ if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", - macb_tx_ring_wrap(tail), skb->data); + macb_tx_ring_wrap(bp, tail), + skb->data); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; } @@ -700,7 +715,8 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", - macb_tx_ring_wrap(tail), skb->data); + macb_tx_ring_wrap(bp, tail), + skb->data); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; } @@ -720,7 +736,7 @@ static void macb_tx_interrupt(struct macb_queue *queue) queue->tx_tail = tail; if (__netif_subqueue_stopped(bp->dev, queue_index) && CIRC_CNT(queue->tx_head, queue->tx_tail, - TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) + bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) netif_wake_subqueue(bp->dev, queue_index); } @@ -731,8 +747,8 @@ static void gem_rx_refill(struct macb *bp) dma_addr_t paddr; while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, - RX_RING_SIZE) > 0) { - entry = macb_rx_ring_wrap(bp->rx_prepared_head); + bp->rx_ring_size) > 0) { + entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head); /* Make hw descriptor updates visible to CPU */ rmb(); @@ -759,7 +775,7 @@ static void gem_rx_refill(struct macb *bp) bp->rx_skbuff[entry] = skb; - if (entry == RX_RING_SIZE - 1) + if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); macb_set_addr(&(bp->rx_ring[entry]), paddr); bp->rx_ring[entry].ctrl = 0; @@ -813,7 +829,7 @@ static int gem_rx(struct macb *bp, int budget) dma_addr_t addr; bool rxused; - entry = macb_rx_ring_wrap(bp->rx_tail); + entry = macb_rx_ring_wrap(bp, bp->rx_tail); desc = &bp->rx_ring[entry]; /* Make hw descriptor updates visible to CPU */ @@ -895,8 +911,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, len = desc->ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - macb_rx_ring_wrap(first_frag), - macb_rx_ring_wrap(last_frag), len); + macb_rx_ring_wrap(bp, first_frag), + macb_rx_ring_wrap(bp, last_frag), len); /* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the @@ -969,12 +985,12 @@ static inline void macb_init_rx_ring(struct macb *bp) int i; addr = bp->rx_buffers_dma; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < bp->rx_ring_size; i++) { bp->rx_ring[i].addr = addr; bp->rx_ring[i].ctrl = 0; addr += bp->rx_buffer_size; } - bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); + bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); bp->rx_tail = 0; } @@ -1214,7 +1230,8 @@ static void macb_poll_controller(struct net_device *dev) static unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int hdrlen) { dma_addr_t mapping; unsigned int len, entry, i, tx_head = queue->tx_head; @@ -1222,15 +1239,28 @@ static unsigned int macb_tx_map(struct macb *bp, struct macb_dma_desc *desc; unsigned int offset, size, count = 0; unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int eof = 1; - u32 ctrl; + unsigned int eof = 1, mss_mfs = 0; + u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; + + /* LSO */ + if (skb_shinfo(skb)->gso_size != 0) { + if (ip_hdr(skb)->protocol == IPPROTO_UDP) + /* UDP - UFO */ + lso_ctrl = MACB_LSO_UFO_ENABLE; + else + /* TCP - TSO */ + lso_ctrl = MACB_LSO_TSO_ENABLE; + } /* First, map non-paged data */ len = skb_headlen(skb); + + /* first buffer length */ + size = hdrlen; + offset = 0; while (len) { - size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(tx_head); + entry = macb_tx_ring_wrap(bp, tx_head); tx_skb = &queue->tx_skb[entry]; mapping = dma_map_single(&bp->pdev->dev, @@ -1249,6 +1279,8 @@ static unsigned int macb_tx_map(struct macb *bp, offset += size; count++; tx_head++; + + size = min(len, bp->max_tx_length); } /* Then, map paged data from fragments */ @@ -1259,7 +1291,7 @@ static unsigned int macb_tx_map(struct macb *bp, offset = 0; while (len) { size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(tx_head); + entry = macb_tx_ring_wrap(bp, tx_head); tx_skb = &queue->tx_skb[entry]; mapping = skb_frag_dma_map(&bp->pdev->dev, frag, @@ -1297,14 +1329,29 @@ static unsigned int macb_tx_map(struct macb *bp, * to set the end of TX queue */ i = tx_head; - entry = macb_tx_ring_wrap(i); + entry = macb_tx_ring_wrap(bp, i); ctrl = MACB_BIT(TX_USED); desc = &queue->tx_ring[entry]; desc->ctrl = ctrl; + if (lso_ctrl) { + if (lso_ctrl == MACB_LSO_UFO_ENABLE) + /* include header and FCS in value given to h/w */ + mss_mfs = skb_shinfo(skb)->gso_size + + skb_transport_offset(skb) + + ETH_FCS_LEN; + else /* TSO */ { + mss_mfs = skb_shinfo(skb)->gso_size; + /* TCP Sequence Number Source Select + * can be set only for TSO + */ + seq_ctrl = 0; + } + } + do { i--; - entry = macb_tx_ring_wrap(i); + entry = macb_tx_ring_wrap(bp, i); tx_skb = &queue->tx_skb[entry]; desc = &queue->tx_ring[entry]; @@ -1313,9 +1360,19 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BIT(TX_LAST); eof = 0; } - if (unlikely(entry == (TX_RING_SIZE - 1))) + if (unlikely(entry == (bp->tx_ring_size - 1))) ctrl |= MACB_BIT(TX_WRAP); + /* First descriptor is header descriptor */ + if (i == queue->tx_head) { + ctrl |= MACB_BF(TX_LSO, lso_ctrl); + ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); + } else + /* Only set MSS/MFS on payload descriptors + * (second or later descriptor) + */ + ctrl |= MACB_BF(MSS_MFS, mss_mfs); + /* Set TX buffer descriptor */ macb_set_addr(desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing @@ -1341,6 +1398,43 @@ dma_error: return 0; } +static netdev_features_t macb_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int nr_frags, f; + unsigned int hdrlen; + + /* Validate LSO compatibility */ + + /* there is only one buffer */ + if (!skb_is_nonlinear(skb)) + return features; + + /* length of header */ + hdrlen = skb_transport_offset(skb); + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + hdrlen += tcp_hdrlen(skb); + + /* For LSO: + * When software supplies two or more payload buffers all payload buffers + * apart from the last must be a multiple of 8 bytes in size. + */ + if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) + return features & ~MACB_NETIF_LSO; + + nr_frags = skb_shinfo(skb)->nr_frags; + /* No need to check last fragment */ + nr_frags--; + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) + return features & ~MACB_NETIF_LSO; + } + return features; +} + static inline int macb_clear_csum(struct sk_buff *skb) { /* no change for packets without checksum offloading */ @@ -1365,7 +1459,28 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) struct macb *bp = netdev_priv(dev); struct macb_queue *queue = &bp->queues[queue_index]; unsigned long flags; - unsigned int count, nr_frags, frag_size, f; + unsigned int desc_cnt, nr_frags, frag_size, f; + unsigned int hdrlen; + bool is_lso, is_udp = 0; + + is_lso = (skb_shinfo(skb)->gso_size != 0); + + if (is_lso) { + is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); + + /* length of headers */ + if (is_udp) + /* only queue eth + ip headers separately for UDP */ + hdrlen = skb_transport_offset(skb); + else + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_headlen(skb) < hdrlen) { + netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); + /* if this is required, would need to copy to single buffer */ + return NETDEV_TX_BUSY; + } + } else + hdrlen = min(skb_headlen(skb), bp->max_tx_length); #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, @@ -1380,17 +1495,22 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) * socket buffer: skb fragments of jumbo frames may need to be * split into many buffer descriptors. */ - count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); + if (is_lso && (skb_headlen(skb) > hdrlen)) + /* extra header descriptor if also payload in first buffer */ + desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; + else + desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); - count += DIV_ROUND_UP(frag_size, bp->max_tx_length); + desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); } spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ - if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, + bp->tx_ring_size) < desc_cnt) { netif_stop_subqueue(dev, queue_index); spin_unlock_irqrestore(&bp->lock, flags); netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", @@ -1404,7 +1524,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Map socket buffer for DMA transfer */ - if (!macb_tx_map(bp, queue, skb)) { + if (!macb_tx_map(bp, queue, skb, hdrlen)) { dev_kfree_skb_any(skb); goto unlock; } @@ -1416,7 +1536,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); - if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) netif_stop_subqueue(dev, queue_index); unlock: @@ -1455,7 +1575,7 @@ static void gem_free_rx_buffers(struct macb *bp) if (!bp->rx_skbuff) return; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < bp->rx_ring_size; i++) { skb = bp->rx_skbuff[i]; if (!skb) @@ -1480,7 +1600,7 @@ static void macb_free_rx_buffers(struct macb *bp) { if (bp->rx_buffers) { dma_free_coherent(&bp->pdev->dev, - RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_ring_size * bp->rx_buffer_size, bp->rx_buffers, bp->rx_buffers_dma); bp->rx_buffers = NULL; } @@ -1493,7 +1613,7 @@ static void macb_free_consistent(struct macb *bp) bp->macbgem_ops.mog_free_rx_buffers(bp); if (bp->rx_ring) { - dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, + dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; } @@ -1502,7 +1622,7 @@ static void macb_free_consistent(struct macb *bp) kfree(queue->tx_skb); queue->tx_skb = NULL; if (queue->tx_ring) { - dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, + dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), queue->tx_ring, queue->tx_ring_dma); queue->tx_ring = NULL; } @@ -1513,14 +1633,14 @@ static int gem_alloc_rx_buffers(struct macb *bp) { int size; - size = RX_RING_SIZE * sizeof(struct sk_buff *); + size = bp->rx_ring_size * sizeof(struct sk_buff *); bp->rx_skbuff = kzalloc(size, GFP_KERNEL); if (!bp->rx_skbuff) return -ENOMEM; - - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - RX_RING_SIZE, bp->rx_skbuff); + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + bp->rx_ring_size, bp->rx_skbuff); return 0; } @@ -1528,7 +1648,7 @@ static int macb_alloc_rx_buffers(struct macb *bp) { int size; - size = RX_RING_SIZE * bp->rx_buffer_size; + size = bp->rx_ring_size * bp->rx_buffer_size; bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) @@ -1547,7 +1667,7 @@ static int macb_alloc_consistent(struct macb *bp) int size; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - size = TX_RING_BYTES; + size = TX_RING_BYTES(bp); queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &queue->tx_ring_dma, GFP_KERNEL); @@ -1558,13 +1678,13 @@ static int macb_alloc_consistent(struct macb *bp) q, size, (unsigned long)queue->tx_ring_dma, queue->tx_ring); - size = TX_RING_SIZE * sizeof(struct macb_tx_skb); + size = bp->tx_ring_size * sizeof(struct macb_tx_skb); queue->tx_skb = kmalloc(size, GFP_KERNEL); if (!queue->tx_skb) goto out_err; } - size = RX_RING_BYTES; + size = RX_RING_BYTES(bp); bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_ring_dma, GFP_KERNEL); if (!bp->rx_ring) @@ -1590,11 +1710,11 @@ static void gem_init_rings(struct macb *bp) int i; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - for (i = 0; i < TX_RING_SIZE; i++) { - macb_set_addr(&(queue->tx_ring[i]), 0); + for (i = 0; i < bp->tx_ring_size; i++) { + queue->tx_ring[i].addr = 0; queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); } - queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); queue->tx_head = 0; queue->tx_tail = 0; } @@ -1611,13 +1731,13 @@ static void macb_init_rings(struct macb *bp) macb_init_rx_ring(bp); - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < bp->tx_ring_size; i++) { bp->queues[0].tx_ring[i].addr = 0; bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); } bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; - bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); } static void macb_reset_hw(struct macb *bp) @@ -1986,19 +2106,9 @@ static int macb_close(struct net_device *dev) static int macb_change_mtu(struct net_device *dev, int new_mtu) { - struct macb *bp = netdev_priv(dev); - u32 max_mtu; - if (netif_running(dev)) return -EBUSY; - max_mtu = ETH_DATA_LEN; - if (bp->caps & MACB_CAPS_JUMBO) - max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; - - if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE)) - return -EINVAL; - dev->mtu = new_mtu; return 0; @@ -2158,8 +2268,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) | MACB_GREGS_VERSION; - tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); - head = macb_tx_ring_wrap(bp->queues[0].tx_head); + tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); + head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); regs_buff[0] = macb_readl(bp, NCR); regs_buff[1] = macb_or_gem_readl(bp, NCFGR); @@ -2214,6 +2324,56 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } +static void macb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct macb *bp = netdev_priv(netdev); + + ring->rx_max_pending = MAX_RX_RING_SIZE; + ring->tx_max_pending = MAX_TX_RING_SIZE; + + ring->rx_pending = bp->rx_ring_size; + ring->tx_pending = bp->tx_ring_size; +} + +static int macb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct macb *bp = netdev_priv(netdev); + u32 new_rx_size, new_tx_size; + unsigned int reset = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_size = clamp_t(u32, ring->rx_pending, + MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); + new_rx_size = roundup_pow_of_two(new_rx_size); + + new_tx_size = clamp_t(u32, ring->tx_pending, + MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); + new_tx_size = roundup_pow_of_two(new_tx_size); + + if ((new_tx_size == bp->tx_ring_size) && + (new_rx_size == bp->rx_ring_size)) { + /* nothing to do */ + return 0; + } + + if (netif_running(bp->dev)) { + reset = 1; + macb_close(bp->dev); + } + + bp->rx_ring_size = new_rx_size; + bp->tx_ring_size = new_tx_size; + + if (reset) + macb_open(bp->dev); + + return 0; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -2223,6 +2383,8 @@ static const struct ethtool_ops macb_ethtool_ops = { .set_wol = macb_set_wol, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = macb_get_ringparam, + .set_ringparam = macb_set_ringparam, }; static const struct ethtool_ops gem_ethtool_ops = { @@ -2235,6 +2397,8 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_sset_count = gem_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = macb_get_ringparam, + .set_ringparam = macb_set_ringparam, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -2298,6 +2462,7 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_poll_controller = macb_poll_controller, #endif .ndo_set_features = macb_set_features, + .ndo_features_check = macb_features_check, }; /* Configure peripheral capabilities according to device tree @@ -2429,6 +2594,9 @@ static int macb_init(struct platform_device *pdev) int err; u32 val; + bp->tx_ring_size = DEFAULT_TX_RING_SIZE; + bp->rx_ring_size = DEFAULT_RX_RING_SIZE; + /* set the queue register mapping once for all: queue0 has a special * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. @@ -2501,6 +2669,11 @@ static int macb_init(struct platform_device *pdev) /* Set features */ dev->hw_features = NETIF_F_SG; + + /* Check LSO capability */ + if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) + dev->hw_features |= MACB_NETIF_LSO; + /* Checksum offload is only available on gem with packet buffer */ if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; @@ -2800,7 +2973,6 @@ static const struct net_device_ops at91ether_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_do_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = at91ether_poll_controller, #endif @@ -3035,6 +3207,13 @@ static int macb_probe(struct platform_device *pdev) goto err_out_free_netdev; } + /* MTU range: 68 - 1500 or 10240 */ + dev->min_mtu = GEM_MTU_MIN_SIZE; + if (bp->caps & MACB_CAPS_JUMBO) + dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; + else + dev->max_mtu = ETH_DATA_LEN; + mac = of_get_mac_address(np); if (mac) ether_addr_copy(bp->dev->dev_addr, mac); |