summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sun
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sun')
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c63
-rw-r--r--drivers/net/ethernet/sun/sunqe.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c775
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h17
7 files changed, 666 insertions, 204 deletions
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 02d370e58110..3dc1f68b322d 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -5179,8 +5179,7 @@ static void cas_remove_one(struct pci_dev *pdev)
cp = netdev_priv(dev);
unregister_netdev(dev);
- if (cp->fw_data)
- vfree(cp->fw_data);
+ vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
cancel_work_sync(&cp->reset_task);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 904fd1ab5f6e..0c6416213837 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6651,13 +6651,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->len < ETH_ZLEN) {
- unsigned int pad_bytes = ETH_ZLEN - skb->len;
-
- if (skb_pad(skb, pad_bytes))
- goto out;
- skb_put(skb, pad_bytes);
- }
+ if (eth_skb_pad(skb))
+ goto out;
len = sizeof(struct tx_pkt_hdr) + 15;
if (skb_headroom(skb) < len) {
@@ -10185,7 +10180,6 @@ MODULE_DEVICE_TABLE(of, niu_match);
static struct platform_driver niu_of_driver = {
.driver = {
.name = "niu",
- .owner = THIS_MODULE,
.of_match_table = niu_match,
},
.probe = niu_of_probe,
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 206c1063815a..aa4f9d2d8fa9 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1273,7 +1273,6 @@ MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
static struct platform_driver bigmac_sbus_driver = {
.driver = {
.name = "sunbmac",
- .owner = THIS_MODULE,
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..7a8ca2c7b7df 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
HMD(("init rxring, "));
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ u32 mapping;
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hp->dma_dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
- dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(skb, RX_OFFSET);
}
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb = hp->rx_skbs[elem];
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
+ u32 mapping;
/* Now refill the entry, if we can. */
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
drops++;
goto drop_it;
}
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, new_skb->data,
+ RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ dev_kfree_skb_any(new_skb);
+ drops++;
+ goto drop_it;
+ }
+
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
- dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(new_skb, RX_OFFSET);
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+ u32 first_len, u32 first_entry, u32 entry)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+ dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+ first_entry = NEXT_TX(first_entry);
+ while (first_entry != entry) {
+ struct happy_meal_txd *this = &txbase[first_entry];
+ u32 addr, len;
+
+ addr = hme_read_desc32(hp, &this->tx_addr);
+ len = hme_read_desc32(hp, &this->tx_flags);
+ len &= TXFLAG_SIZE;
+ dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+ goto out_dma_error;
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+ goto out_dma_error;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ unmap_partial_tx_skb(hp, first_mapping, first_len,
+ first_entry, entry);
+ goto out_dma_error;
+ }
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
+
+out_dma_error:
+ hp->tx_skbs[hp->tx_new] = NULL;
+ spin_unlock_irq(&hp->happy_lock);
+
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
@@ -3271,7 +3323,6 @@ MODULE_DEVICE_TABLE(of, hme_sbus_match);
static struct platform_driver hme_sbus_driver = {
.driver = {
.name = "hme",
- .owner = THIS_MODULE,
.of_match_table = hme_sbus_match,
},
.probe = hme_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 5695ae2411de..9b825780b3be 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -963,7 +963,6 @@ MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
- .owner = THIS_MODULE,
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3652afd3ec78..45c408ef67d0 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -15,12 +15,14 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
+#include <linux/highmem.h>
#include <linux/if_vlan.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/icmpv6.h>
#endif
+#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
@@ -40,6 +42,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+#define VNET_MAX_TXQS 16
+
/* Heuristic for the number of times to exponentially backoff and
* retry sending an LDC trigger when EAGAIN is encountered
*/
@@ -49,6 +53,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 8 },
+ { .major = 1, .minor = 7 },
{ .major = 1, .minor = 6 },
{ .major = 1, .minor = 0 },
};
@@ -71,13 +77,19 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
return -ECONNRESET;
}
+static int vnet_port_alloc_tx_ring(struct vnet_port *port);
+
static int vnet_send_attr(struct vio_driver_state *vio)
{
struct vnet_port *port = to_vnet_port(vio);
struct net_device *dev = port->vp->dev;
struct vio_net_attr_info pkt;
int framelen = ETH_FRAME_LEN;
- int i;
+ int i, err;
+
+ err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
+ if (err)
+ return err;
memset(&pkt, 0, sizeof(pkt));
pkt.tag.type = VIO_TYPE_CTRL;
@@ -108,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio)
pkt.mtu = framelen + VLAN_HLEN;
}
- pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
pkt.cflags = 0;
+ if (vio_version_after_eq(vio, 1, 7) && port->tso) {
+ pkt.cflags |= VNET_LSO_IPV4_CAPAB;
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ pkt.ipv4_lso_maxlen = port->tsolen;
+ }
+
+ pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
"ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
@@ -163,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio,
}
port->rmtu = localmtu;
+ /* LSO negotiation */
+ if (vio_version_after_eq(vio, 1, 7))
+ port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
+ else
+ port->tso = false;
+ if (port->tso) {
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
+ if (port->tsolen < VNET_MINTSO) {
+ port->tso = false;
+ port->tsolen = 0;
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ }
+ pkt->ipv4_lso_maxlen = port->tsolen;
+ } else {
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ pkt->ipv4_lso_maxlen = 0;
+ }
+
/* for version >= 1.6, ACK packet mode we support */
if (vio_version_after_eq(vio, 1, 6)) {
pkt->xfer_mode = VIO_NEW_DRING_MODE;
@@ -274,10 +313,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
return skb;
}
-static int vnet_rx_one(struct vnet_port *port, unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies)
+static inline void vnet_fullcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = skb_transport_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 1;
+ skb->csum = 0;
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *ptcp = tcp_hdr(skb);
+
+ ptcp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *pudp = udp_hdr(skb);
+
+ pudp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_UDP,
+ skb->csum);
+ }
+}
+
+static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
{
struct net_device *dev = port->vp->dev;
+ unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
int err;
@@ -299,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_put(skb, copy_len);
err = ldc_copy(port->vio.lp, LDC_COPY_IN,
skb->data, copy_len, 0,
- cookies, ncookies);
+ desc->cookies, desc->ncookies);
if (unlikely(err < 0)) {
dev->stats.rx_frame_errors++;
goto out_free_skb;
@@ -309,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ if (vio_version_after_eq(&port->vio, 1, 8)) {
+ struct vio_net_dext *dext = vio_net_ext(desc);
- netif_rx(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
+ if (skb->protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ iph->check = 0;
+ ip_send_check(iph);
+ }
+ }
+ if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
+ skb->ip_summed == CHECKSUM_NONE)
+ vnet_fullcsum(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_level = 0;
+ if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
+ skb->csum_level = 1;
+ }
+ }
+
+ skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ napi_gro_receive(&port->napi, skb);
return 0;
out_free_skb:
@@ -373,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
return err;
}
-static u32 next_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (++idx == dr->num_entries)
- idx = 0;
- return idx;
-}
-
-static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (idx == 0)
- idx = dr->num_entries - 1;
- else
- idx--;
-
- return idx;
-}
-
static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
struct vio_dring_state *dr,
u32 index)
@@ -430,6 +506,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
struct vio_driver_state *vio = &port->vio;
int err;
+ BUG_ON(desc == NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -444,7 +521,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ err = vnet_rx_one(port, desc);
if (err == -ECONNRESET)
return err;
desc->hdr.state = VIO_DESC_DONE;
@@ -456,12 +533,14 @@ static int vnet_walk_rx_one(struct vnet_port *port,
}
static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end)
+ u32 start, u32 end, int *npkts, int budget)
{
struct vio_driver_state *vio = &port->vio;
int ack_start = -1, ack_end = -1;
+ bool send_ack = true;
- end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+ end = (end == (u32) -1) ? vio_dring_prev(dr, start)
+ : vio_dring_next(dr, end);
viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
@@ -471,10 +550,11 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
if (err != 0)
break;
+ (*npkts)++;
if (ack_start == -1)
ack_start = start;
ack_end = start;
- start = next_idx(start, dr);
+ start = vio_dring_next(dr, start);
if (ack && start != end) {
err = vnet_send_ack(port, dr, ack_start, ack_end,
VIO_DRING_ACTIVE);
@@ -482,13 +562,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
ack_start = -1;
}
+ if ((*npkts) >= budget) {
+ send_ack = false;
+ break;
+ }
}
if (unlikely(ack_start == -1))
- ack_start = ack_end = prev_idx(start, dr);
- return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+ ack_start = ack_end = vio_dring_prev(dr, start);
+ if (send_ack) {
+ port->napi_resume = false;
+ return vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_STOPPED);
+ } else {
+ port->napi_resume = true;
+ port->napi_stop_idx = ack_end;
+ return 1;
+ }
}
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+ int budget)
{
struct vio_dring_data *pkt = msgbuf;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
@@ -505,11 +598,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
return 0;
}
- dr->rcv_nxt++;
+ if (!port->napi_resume)
+ dr->rcv_nxt++;
/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
- return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+ npkts, budget);
}
static int idx_is_pending(struct vio_dring_state *dr, u32 end)
@@ -522,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
found = 1;
break;
}
- idx = next_idx(idx, dr);
+ idx = vio_dring_next(dr, idx);
}
return found;
}
@@ -535,19 +630,26 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct vnet *vp;
u32 end;
struct vio_net_desc *desc;
+ struct netdev_queue *txq;
+
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
end = pkt->end_idx;
- if (unlikely(!idx_is_pending(dr, end)))
+ vp = port->vp;
+ dev = vp->dev;
+ netif_tx_lock(dev);
+ if (unlikely(!idx_is_pending(dr, end))) {
+ netif_tx_unlock(dev);
return 0;
+ }
/* sync for race conditions with vnet_start_xmit() and tell xmit it
* is time to send a trigger.
*/
- dr->cons = next_idx(end, dr);
+ dr->cons = vio_dring_next(dr, end);
desc = vio_dring_entry(dr, dr->cons);
- if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
+ if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
/* vnet_start_xmit() just populated this dring but missed
* sending the "start" LDC message to the consumer.
* Send a "start" trigger on its behalf.
@@ -559,11 +661,10 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
} else {
port->start_cons = true;
}
+ netif_tx_unlock(dev);
-
- vp = port->vp;
- dev = vp->dev;
- if (unlikely(netif_queue_stopped(dev) &&
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
return 1;
@@ -591,58 +692,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0;
}
-static void maybe_tx_wakeup(unsigned long param)
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
{
- struct vnet *vp = (struct vnet *)param;
- struct net_device *dev = vp->dev;
-
- netif_tx_lock(dev);
- if (likely(netif_queue_stopped(dev))) {
- struct vnet_port *port;
- int wake = 1;
+ struct netdev_queue *txq;
- list_for_each_entry(port, &vp->port_list, list) {
- struct vio_dring_state *dr;
+ txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (likely(netif_tx_queue_stopped(txq))) {
+ struct vio_dring_state *dr;
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (vnet_tx_dring_avail(dr) <
- VNET_TX_WAKEUP_THRESH(dr)) {
- wake = 0;
- break;
- }
- }
- if (wake)
- netif_wake_queue(dev);
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ netif_tx_wake_queue(txq);
}
- netif_tx_unlock(dev);
+ __netif_tx_unlock(txq);
}
-static void vnet_event(void *arg, int event)
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
{
- struct vnet_port *port = arg;
struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
int tx_wakeup, err;
+ int npkts = 0;
+ int event = (port->rx_event & LDC_EVENT_RESET);
- spin_lock_irqsave(&vio->lock, flags);
-
+ldc_ctrl:
if (unlikely(event == LDC_EVENT_RESET ||
event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
if (event == LDC_EVENT_RESET) {
port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
vio_port_up(vio);
}
- return;
+ port->rx_event = 0;
+ return 0;
}
+ /* We may have multiple LDC events in rx_event. Unroll send_events() */
+ event = (port->rx_event & LDC_EVENT_UP);
+ port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
+ if (event == LDC_EVENT_UP)
+ goto ldc_ctrl;
+ event = port->rx_event;
+ if (!(event & LDC_EVENT_DATA_READY))
+ return 0;
- if (unlikely(event != LDC_EVENT_DATA_READY)) {
- pr_warn("Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
- }
+ /* we dont expect any other bits than RESET, UP, DATA_READY */
+ BUG_ON(event != LDC_EVENT_DATA_READY);
tx_wakeup = err = 0;
while (1) {
@@ -651,6 +758,20 @@ static void vnet_event(void *arg, int event)
u64 raw[8];
} msgbuf;
+ if (port->napi_resume) {
+ struct vio_dring_data *pkt =
+ (struct vio_dring_data *)&msgbuf;
+ struct vio_dring_state *dr =
+ &port->vio.drings[VIO_DRIVER_RX_RING];
+
+ pkt->tag.type = VIO_TYPE_DATA;
+ pkt->tag.stype = VIO_SUBTYPE_INFO;
+ pkt->tag.stype_env = VIO_DRING_DATA;
+ pkt->seq = dr->rcv_nxt;
+ pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
+ pkt->end_idx = -1;
+ goto napi_resume;
+ }
err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
@@ -667,10 +788,22 @@ static void vnet_event(void *arg, int event)
err = vio_validate_sid(vio, &msgbuf.tag);
if (err < 0)
break;
-
+napi_resume:
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- err = vnet_rx(port, &msgbuf);
+ if (!port_is_up(port)) {
+ /* failures like handshake_failure()
+ * may have cleaned up dring, but
+ * NAPI polling may bring us here.
+ */
+ err = -ECONNRESET;
+ break;
+ }
+ err = vnet_rx(port, &msgbuf, &npkts, budget);
+ if (npkts >= budget)
+ break;
+ if (npkts == 0)
+ break;
} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
err = vnet_ack(port, &msgbuf);
if (err > 0)
@@ -691,15 +824,34 @@ static void vnet_event(void *arg, int event)
if (err == -ECONNRESET)
break;
}
- spin_unlock(&vio->lock);
- /* Kick off a tasklet to wake the queue. We cannot call
- * maybe_tx_wakeup directly here because we could deadlock on
- * netif_tx_lock() with dev_watchdog()
- */
if (unlikely(tx_wakeup && err != -ECONNRESET))
- tasklet_schedule(&port->vp->vnet_tx_wakeup);
+ maybe_tx_wakeup(port);
+ return npkts;
+}
+
+static int vnet_poll(struct napi_struct *napi, int budget)
+{
+ struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+ struct vio_driver_state *vio = &port->vio;
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+ napi_complete(napi);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+ return processed;
+}
+
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+
+ port->rx_event |= event;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+ napi_schedule(&port->napi);
- local_irq_restore(flags);
}
static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
@@ -746,26 +898,19 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
return err;
}
-static inline bool port_is_up(struct vnet_port *vnet)
-{
- struct vio_driver_state *vio = &vnet->vio;
-
- return !!(vio->hs_state & VIO_HS_COMPLETE);
-}
-
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
struct vnet_port *port;
- hlist_for_each_entry(port, hp, hash) {
+ hlist_for_each_entry_rcu(port, hp, hash) {
if (!port_is_up(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- list_for_each_entry(port, &vp->port_list, list) {
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
if (!port_is_up(port))
@@ -775,18 +920,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
return NULL;
}
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
- struct vnet_port *ret;
- unsigned long flags;
-
- spin_lock_irqsave(&vp->lock, flags);
- ret = __tx_port_find(vp, skb);
- spin_unlock_irqrestore(&vp->lock, flags);
-
- return ret;
-}
-
static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
unsigned *pending)
{
@@ -847,11 +980,10 @@ static void vnet_clean_timer_expire(unsigned long port0)
struct vnet_port *port = (struct vnet_port *)port0;
struct sk_buff *freeskbs;
unsigned pending;
- unsigned long flags;
- spin_lock_irqsave(&port->vio.lock, flags);
+ netif_tx_lock(port->vp->dev);
freeskbs = vnet_clean_tx_ring(port, &pending);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ netif_tx_unlock(port->vp->dev);
vnet_free_skbs(freeskbs);
@@ -862,11 +994,54 @@ static void vnet_clean_timer_expire(unsigned long port0)
del_timer(&port->clean_timer);
}
-static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
- int *plen)
+static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm)
+{
+ int i, nc, err, blen;
+
+ /* header */
+ blen = skb_headlen(skb);
+ if (blen < ETH_ZLEN)
+ blen = ETH_ZLEN;
+ blen += VNET_PACKET_SKIP;
+ blen += 8 - (blen & 7);
+
+ err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
+ ncookies, map_perm);
+ if (err < 0)
+ return err;
+ nc = err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ if (nc < ncookies) {
+ vaddr = kmap_atomic(skb_frag_page(f));
+ blen = skb_frag_size(f);
+ blen += 8 - (blen & 7);
+ err = ldc_map_single(lp, vaddr + f->page_offset,
+ blen, cookies + nc, ncookies - nc,
+ map_perm);
+ kunmap_atomic(vaddr);
+ } else {
+ err = -EMSGSIZE;
+ }
+
+ if (err < 0) {
+ ldc_unmap(lp, cookies, nc);
+ return err;
+ }
+ nc += err;
+ }
+ return nc;
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
{
struct sk_buff *nskb;
- int len, pad;
+ int i, len, pad, docopy;
len = skb->len;
pad = 0;
@@ -876,51 +1051,223 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
}
len += VNET_PACKET_SKIP;
pad += 8 - (len & 7);
- len += 8 - (len & 7);
+ /* make sure we have enough cookies and alignment in every frag */
+ docopy = skb_shinfo(skb)->nr_frags >= ncookies;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ docopy |= f->page_offset & 7;
+ }
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
- skb_headroom(skb) < VNET_PACKET_SKIP) {
- nskb = alloc_and_align_skb(skb->dev, skb->len);
+ skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
+ int start = 0, offset;
+ __wsum csum;
+
+ len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ nskb = alloc_and_align_skb(skb->dev, len);
+ if (nskb == NULL) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_reserve(nskb, VNET_PACKET_SKIP);
- if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+
+ nskb->protocol = skb->protocol;
+ offset = skb_mac_header(skb) - skb->data;
+ skb_set_mac_header(nskb, offset);
+ offset = skb_network_header(skb) - skb->data;
+ skb_set_network_header(nskb, offset);
+ offset = skb_transport_header(skb) - skb->data;
+ skb_set_transport_header(nskb, offset);
+
+ offset = 0;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->ip_summed = skb->ip_summed;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ start = skb_checksum_start_offset(skb);
+ if (start) {
+ struct iphdr *iph = ip_hdr(nskb);
+ int offset = start + nskb->csum_offset;
+
+ if (skb_copy_bits(skb, 0, nskb->data, start)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ *(__sum16 *)(skb->data + offset) = 0;
+ csum = skb_copy_and_csum_bits(skb, start,
+ nskb->data + start,
+ skb->len - start, 0);
+ if (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) {
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - start,
+ iph->protocol, csum);
+ }
+ *(__sum16 *)(nskb->data + offset) = csum;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+ } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
dev_kfree_skb(nskb);
dev_kfree_skb(skb);
return NULL;
}
(void)skb_put(nskb, skb->len);
+ if (skb_is_gso(skb)) {
+ skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+ }
dev_kfree_skb(skb);
skb = nskb;
}
-
- *pstart = skb->data - VNET_PACKET_SKIP;
- *plen = len;
return skb;
}
+static u16
+vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = __tx_port_find(vp, skb);
+
+ if (port == NULL)
+ return 0;
+ return port->q_index;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
+{
+ struct net_device *dev = port->vp->dev;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *segs;
+ int maclen, datalen;
+ int status;
+ int gso_size, gso_type, gso_segs;
+ int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+ int proto = IPPROTO_IP;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ if (proto == IPPROTO_TCP)
+ hlen += tcp_hdr(skb)->doff * 4;
+ else if (proto == IPPROTO_UDP)
+ hlen += sizeof(struct udphdr);
+ else {
+ pr_err("vnet_handle_offloads GSO with unknown transport "
+ "protocol %d tproto %d\n", skb->protocol, proto);
+ hlen = 128; /* XXX */
+ }
+ datalen = port->tsolen - hlen;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_type = skb_shinfo(skb)->gso_type;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (port->tso && gso_size < datalen)
+ gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
+
+ if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
+ return NETDEV_TX_BUSY;
+ netif_tx_wake_queue(txq);
+ }
+
+ maclen = skb_network_header(skb) - skb_mac_header(skb);
+ skb_pull(skb, maclen);
+
+ if (port->tso && gso_size < datalen) {
+ /* segment to TSO size */
+ skb_shinfo(skb)->gso_size = datalen;
+ skb_shinfo(skb)->gso_segs = gso_segs;
+
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+
+ /* restore gso_size & gso_segs */
+ skb_shinfo(skb)->gso_size = gso_size;
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
+ gso_size);
+ } else
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs)) {
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ skb_push(skb, maclen);
+ skb_reset_mac_header(skb);
+
+ status = 0;
+ while (segs) {
+ struct sk_buff *curr = segs;
+
+ segs = segs->next;
+ curr->next = NULL;
+ if (port->tso && curr->len > dev->mtu) {
+ skb_shinfo(curr)->gso_size = gso_size;
+ skb_shinfo(curr)->gso_type = gso_type;
+ skb_shinfo(curr)->gso_segs =
+ DIV_ROUND_UP(curr->len - hlen, gso_size);
+ } else
+ skb_shinfo(curr)->gso_size = 0;
+
+ skb_push(curr, maclen);
+ skb_reset_mac_header(curr);
+ memcpy(skb_mac_header(curr), skb_mac_header(skb),
+ maclen);
+ curr->csum_start = skb_transport_header(curr) - curr->head;
+ if (ip_hdr(curr)->protocol == IPPROTO_TCP)
+ curr->csum_offset = offsetof(struct tcphdr, check);
+ else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
+ curr->csum_offset = offsetof(struct udphdr, check);
+
+ if (!(status & NETDEV_TX_MASK))
+ status = vnet_start_xmit(curr, dev);
+ if (status & NETDEV_TX_MASK)
+ dev_kfree_skb_any(curr);
+ }
+
+ if (!(status & NETDEV_TX_MASK))
+ dev_kfree_skb_any(skb);
+ return status;
+}
+
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = tx_port_find(vp, skb);
+ struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
- unsigned long flags;
unsigned int len;
struct sk_buff *freeskbs = NULL;
int i, err, txi;
- void *start = NULL;
- int nlen = 0;
unsigned pending = 0;
+ struct netdev_queue *txq;
- if (unlikely(!port))
+ rcu_read_lock();
+ port = __tx_port_find(vp, skb);
+ if (unlikely(!port)) {
+ rcu_read_unlock();
goto out_dropped;
+ }
- skb = vnet_skb_shape(skb, &start, &nlen);
-
- if (unlikely(!skb))
- goto out_dropped;
+ if (skb_is_gso(skb) && skb->len > port->tsolen) {
+ err = vnet_handle_offloads(port, skb);
+ rcu_read_unlock();
+ return err;
+ }
- if (skb->len > port->rmtu) {
+ if (!skb_is_gso(skb) && skb->len > port->rmtu) {
unsigned long localmtu = port->rmtu - ETH_HLEN;
if (vio_version_after_eq(&port->vio, 1, 3))
@@ -937,6 +1284,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
fl4.saddr = ip_hdr(skb)->saddr;
rt = ip_route_output_key(dev_net(dev), &fl4);
+ rcu_read_unlock();
if (!IS_ERR(rt)) {
skb_dst_set(skb, &rt->dst);
icmp_send(skb, ICMP_DEST_UNREACH,
@@ -951,18 +1299,26 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped;
}
- spin_lock_irqsave(&port->vio.lock, flags);
+ skb = vnet_skb_shape(skb, 2);
+
+ if (unlikely(!skb))
+ goto out_dropped;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vnet_fullcsum(skb);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ rcu_read_unlock();
return NETDEV_TX_BUSY;
}
@@ -978,16 +1334,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN)
len = ETH_ZLEN;
- port->tx_bufs[txi].skb = skb;
- skb = NULL;
-
- err = ldc_map_single(port->vio.lp, start, nlen,
- port->tx_bufs[txi].cookies, VNET_MAXCOOKIES,
- (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+ err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
+ (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
if (err < 0) {
netdev_info(dev, "tx buffer map error %d\n", err);
- goto out_dropped_unlock;
+ goto out_dropped;
}
+
+ port->tx_bufs[txi].skb = skb;
+ skb = NULL;
port->tx_bufs[txi].ncookies = err;
/* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
@@ -1003,6 +1358,21 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
d->ncookies = port->tx_bufs[txi].ncookies;
for (i = 0; i < d->ncookies; i++)
d->cookies[i] = port->tx_bufs[txi].cookies[i];
+ if (vio_version_after_eq(&port->vio, 1, 7)) {
+ struct vio_net_dext *dext = vio_net_ext(d);
+
+ memset(dext, 0, sizeof(*dext));
+ if (skb_is_gso(port->tx_bufs[txi].skb)) {
+ dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
+ ->gso_size;
+ dext->flags |= VNET_PKT_IPV4_LSO;
+ }
+ if (vio_version_after_eq(&port->vio, 1, 8) &&
+ !port->switch_port) {
+ dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
+ dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
+ }
+ }
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
@@ -1039,7 +1409,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
- goto out_dropped_unlock;
+ goto out_dropped;
}
ldc_start_done:
@@ -1050,31 +1420,29 @@ ldc_start_done:
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(txq);
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+ rcu_read_unlock();
vnet_free_skbs(freeskbs);
- (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
-
return NETDEV_TX_OK;
-out_dropped_unlock:
- spin_unlock_irqrestore(&port->vio.lock, flags);
-
out_dropped:
- if (skb)
- dev_kfree_skb(skb);
- vnet_free_skbs(freeskbs);
if (pending)
(void)mod_timer(&port->clean_timer,
jiffies + VNET_CLEAN_TIMEOUT);
else if (port)
del_timer(&port->clean_timer);
+ if (port)
+ rcu_read_unlock();
+ if (skb)
+ dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1087,14 +1455,14 @@ static void vnet_tx_timeout(struct net_device *dev)
static int vnet_open(struct net_device *dev)
{
netif_carrier_on(dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
static int vnet_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
return 0;
@@ -1204,18 +1572,17 @@ static void vnet_set_rx_mode(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
- unsigned long flags;
- spin_lock_irqsave(&vp->lock, flags);
- if (!list_empty(&vp->port_list)) {
- port = list_entry(vp->port_list.next, struct vnet_port, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (port->switch_port) {
__update_mc_list(vp, dev);
__send_mc_list(vp, port);
+ break;
}
}
- spin_unlock_irqrestore(&vp->lock, flags);
+ rcu_read_unlock();
}
static int vnet_change_mtu(struct net_device *dev, int new_mtu)
@@ -1295,18 +1662,20 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
}
}
-static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
+static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
struct vio_dring_state *dr;
- unsigned long len;
+ unsigned long len, elen;
int i, err, ncookies;
void *dring;
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- len = (VNET_TX_RING_SIZE *
- (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2)));
+ elen = sizeof(struct vio_net_desc) +
+ sizeof(struct ldc_trans_cookie) * 2;
+ if (vio_version_after_eq(&port->vio, 1, 7))
+ elen += sizeof(struct vio_net_dext);
+ len = VNET_TX_RING_SIZE * elen;
ncookies = VIO_MAX_RING_COOKIES;
dring = ldc_alloc_exp_dring(port->vio.lp, len,
@@ -1320,8 +1689,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
}
dr->base = dring;
- dr->entry_size = (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2));
+ dr->entry_size = elen;
dr->num_entries = VNET_TX_RING_SIZE;
dr->prod = dr->cons = 0;
port->start_cons = true; /* need an initial trigger */
@@ -1342,6 +1710,21 @@ err_out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+ napi_schedule(&port->napi);
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+#endif
static LIST_HEAD(vnet_list);
static DEFINE_MUTEX(vnet_list_mutex);
@@ -1354,6 +1737,10 @@ static const struct net_device_ops vnet_ops = {
.ndo_tx_timeout = vnet_tx_timeout,
.ndo_change_mtu = vnet_change_mtu,
.ndo_start_xmit = vnet_start_xmit,
+ .ndo_select_queue = vnet_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vnet_poll_controller,
+#endif
};
static struct vnet *vnet_new(const u64 *local_mac)
@@ -1362,7 +1749,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
struct vnet *vp;
int err, i;
- dev = alloc_etherdev(sizeof(*vp));
+ dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->needed_headroom = VNET_PACKET_SKIP + 8;
@@ -1374,7 +1761,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
- tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
vp->dev = dev;
INIT_LIST_HEAD(&vp->port_list);
@@ -1387,6 +1773,10 @@ static struct vnet *vnet_new(const u64 *local_mac)
dev->ethtool_ops = &vnet_ethtool_ops;
dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features = dev->hw_features;
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1434,7 +1824,6 @@ static void vnet_cleanup(void)
vp = list_first_entry(&vnet_list, struct vnet, list);
list_del(&vp->list);
dev = vp->dev;
- tasklet_kill(&vp->vnet_tx_wakeup);
/* vio_unregister_driver() should have cleaned up port_list */
BUG_ON(!list_empty(&vp->port_list));
unregister_netdev(dev);
@@ -1489,6 +1878,25 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address";
+static void
+vnet_port_add_txq(struct vnet_port *port)
+{
+ struct vnet *vp = port->vp;
+ int n;
+
+ n = vp->nports++;
+ n = n & (VNET_MAX_TXQS - 1);
+ port->q_index = n;
+ netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+}
+
+static void
+vnet_port_rm_txq(struct vnet_port *port)
+{
+ port->vp->nports--;
+ netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+}
+
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -1536,9 +1944,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- err = vnet_port_alloc_tx_bufs(port);
- if (err)
- goto err_out_free_ldc;
+ netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
@@ -1547,13 +1953,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
switch_port = 1;
port->switch_port = switch_port;
+ port->tso = true;
+ port->tsolen = 0;
spin_lock_irqsave(&vp->lock, flags);
if (switch_port)
- list_add(&port->list, &vp->port_list);
+ list_add_rcu(&port->list, &vp->port_list);
else
- list_add_tail(&port->list, &vp->port_list);
- hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ list_add_tail_rcu(&port->list, &vp->port_list);
+ hlist_add_head_rcu(&port->hash,
+ &vp->port_hash[vnet_hashfn(port->raddr)]);
+ vnet_port_add_txq(port);
spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port);
@@ -1564,15 +1974,13 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
setup_timer(&port->clean_timer, vnet_clean_timer_expire,
(unsigned long)port);
+ napi_enable(&port->napi);
vio_port_up(&port->vio);
mdesc_release(hp);
return 0;
-err_out_free_ldc:
- vio_ldc_free(&port->vio);
-
err_out_free_port:
kfree(port);
@@ -1586,17 +1994,18 @@ static int vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- struct vnet *vp = port->vp;
- unsigned long flags;
del_timer_sync(&port->vio.timer);
- del_timer_sync(&port->clean_timer);
- spin_lock_irqsave(&vp->lock, flags);
- list_del(&port->list);
- hlist_del(&port->hash);
- spin_unlock_irqrestore(&vp->lock, flags);
+ napi_disable(&port->napi);
+
+ list_del_rcu(&port->list);
+ hlist_del_rcu(&port->hash);
+ synchronize_rcu();
+ del_timer_sync(&port->clean_timer);
+ vnet_port_rm_txq(port);
+ netif_napi_del(&port->napi);
vnet_port_free_tx_bufs(port);
vio_ldc_free(&port->vio);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c91104542619..01ca78191683 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -20,6 +20,9 @@
#define VNET_TX_RING_SIZE 512
#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
+#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
+
/* VNET packets are sent in buffers with the first 6 bytes skipped
* so that after the ethernet header the IPv4/IPv6 headers are aligned
* properly.
@@ -40,8 +43,9 @@ struct vnet_port {
struct hlist_node hash;
u8 raddr[ETH_ALEN];
- u8 switch_port;
- u8 __pad;
+ unsigned switch_port:1;
+ unsigned tso:1;
+ unsigned __pad:14;
struct vnet *vp;
@@ -56,6 +60,13 @@ struct vnet_port {
struct timer_list clean_timer;
u64 rmtu;
+ u16 tsolen;
+
+ struct napi_struct napi;
+ u32 napi_stop_idx;
+ bool napi_resume;
+ int rx_event;
+ u16 q_index;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
@@ -97,7 +108,7 @@ struct vnet {
struct list_head list;
u64 local_mac;
- struct tasklet_struct vnet_tx_wakeup;
+ int nports;
};
#endif /* _SUNVNET_H */