summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/net/virtio_net.c155
-rw-r--r--drivers/nfc/pn533/usb.c1
-rw-r--r--net/ipv4/tcp_output.c2
5 files changed, 93 insertions, 74 deletions
diff --git a/.mailmap b/.mailmap
index 66754ca6656d..438d79c27717 100644
--- a/.mailmap
+++ b/.mailmap
@@ -210,6 +210,9 @@ Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
+Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
+Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
+Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 8d5bc223f305..d86c7807aa20 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5971,7 +5971,7 @@ F: include/linux/dm-*.h
F: include/uapi/linux/dm-*.h
DEVLINK
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/devlink
@@ -15079,7 +15079,7 @@ F: Documentation/hwmon/nzxt-smart2.rst
F: drivers/hwmon/nzxt-smart2.c
OBJAGG
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/objagg.h
@@ -15853,7 +15853,7 @@ F: drivers/video/logo/logo_parisc*
F: include/linux/hp_sdc.h
PARMAN
-M: Jiri Pirko <jiri@nvidia.com>
+M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
S: Supported
F: include/linux/parman.h
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fb5e68ed3ec2..1a309cfb4976 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -545,6 +545,87 @@ ok:
return skb;
}
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+{
+ unsigned int len;
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ if (likely(!is_xdp_frame(ptr))) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ packets++;
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
+}
+
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ /* If running out of space, stop queue to avoid getting packets that we
+ * are then unable to transmit.
+ * An alternative would be to force queuing layer to requeue the skb by
+ * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+ * returned in a normal path of operation: it means that driver is not
+ * maintaining the TX queue stop/start state properly, and causes
+ * the stack to do a non-trivial amount of useless work.
+ * Since most packets only take 1 or 2 ring slots, stopping the queue
+ * early means 16 slots are typically wasted.
+ */
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ netif_stop_subqueue(dev, qnum);
+ if (use_napi) {
+ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ /* More just got used, free them then recheck. */
+ free_old_xmit_skbs(sq, false);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ netif_start_subqueue(dev, qnum);
+ virtqueue_disable_cb(sq->vq);
+ }
+ }
+ }
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -686,6 +767,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
ret = nxmit;
+ if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+ check_sq_full_and_disable(vi, dev, sq);
+
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
kicks = 1;
@@ -1714,52 +1798,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
-{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
-
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
-
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.bytes += bytes;
- sq->stats.packets += packets;
- u64_stats_update_end(&sq->stats.syncp);
-}
-
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
- return false;
- else if (q < vi->curr_queue_pairs)
- return true;
- else
- return false;
-}
-
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1989,30 +2027,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- /* If running out of space, stop queue to avoid getting packets that we
- * are then unable to transmit.
- * An alternative would be to force queuing layer to requeue the skb by
- * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
- * returned in a normal path of operation: it means that driver is not
- * maintaining the TX queue stop/start state properly, and causes
- * the stack to do a non-trivial amount of useless work.
- * Since most packets only take 1 or 2 ring slots, stopping the queue
- * early means 16 slots are typically wasted.
- */
- if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
- netif_stop_subqueue(dev, qnum);
- if (use_napi) {
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
- /* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
- if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
- netif_start_subqueue(dev, qnum);
- virtqueue_disable_cb(sq->vq);
- }
- }
- }
+ check_sq_full_and_disable(vi, dev, sq);
if (kick || netif_xmit_stopped(txq)) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index ed9c5e2cf3ad..a187f0e0b0f7 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
out->data, out->len, false);
+ arg.phy = phy;
init_completion(&arg.done);
cntx = phy->out_urb->context;
phy->out_urb->context = &arg;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 71d01cf3c13e..ba839e441450 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3605,7 +3605,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
tcp_options_write(th, NULL, &opts);
th->doff = (tcp_header_size >> 2);
- __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */