summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbevf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c116
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h101
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1214
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
8 files changed, 1042 insertions, 394 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 4ce4c97ef5ad..bb47814cfa90 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
################################################################################
#
# Intel 82599 Virtual Function driver
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 8617cae2f801..71c828842b11 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*******************************************************************************
Intel 82599 Virtual Function driver
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 4400e49090b4..8e7d6c6f5c92 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2018 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -82,6 +82,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
#define IXGBEVF_QUEUE_STATS_LEN ( \
(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
(sizeof(struct ixgbevf_stats) / sizeof(u64)))
#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
@@ -94,6 +95,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
+
static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -241,6 +249,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -260,7 +270,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
u32 new_rx_count, new_tx_count;
- int i, err = 0;
+ int i, j, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -284,15 +294,19 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ adapter->xdp_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
+ adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ tx_ring = vmalloc((adapter->num_tx_queues +
+ adapter->num_xdp_queues) * sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
@@ -315,6 +329,24 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
}
+
+ for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = *adapter->xdp_ring[j];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(&tx_ring[i]);
+ }
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
+ }
+ }
}
if (new_rx_count != adapter->rx_ring_count) {
@@ -327,8 +359,13 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
rx_ring[i] = *adapter->rx_ring[i];
+
+ /* Clear copied XDP RX-queue info */
+ memset(&rx_ring[i].xdp_rxq, 0,
+ sizeof(rx_ring[i].xdp_rxq));
+
rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
if (err) {
while (i) {
i--;
@@ -354,6 +391,12 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
adapter->tx_ring_count = new_tx_count;
+ for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
+ ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
+ *adapter->xdp_ring[j] = tx_ring[i];
+ }
+ adapter->xdp_ring_count = new_tx_count;
+
vfree(tx_ring);
tx_ring = NULL;
}
@@ -376,7 +419,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
- for (i = 0; i < adapter->num_tx_queues; i++)
+ for (i = 0;
+ i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
@@ -392,6 +436,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
return IXGBEVF_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IXGBEVF_PRIV_FLAGS_STR_LEN;
default:
return -EINVAL;
}
@@ -446,6 +492,23 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
i += 2;
}
+ /* populate XDP queue data */
+ for (j = 0; j < adapter->num_xdp_queues; j++) {
+ ring = adapter->xdp_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+ continue;
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+ }
+
/* populate Rx queue data */
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
@@ -489,6 +552,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ sprintf(p, "xdp_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "xdp_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
@@ -496,6 +565,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ixgbevf_priv_flags_strings,
+ IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -888,6 +961,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return err;
}
+static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
+ priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
+ if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
+ flags |= IXGBEVF_FLAGS_LEGACY_RX;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
@@ -909,6 +1013,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
.get_link_ksettings = ixgbevf_get_link_ksettings,
+ .get_priv_flags = ixgbevf_get_priv_flags,
+ .set_priv_flags = ixgbevf_set_priv_flags,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index f6952425c87d..447ce1d5e0e3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2018 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -34,6 +35,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
#include "vf.h"
@@ -50,7 +52,11 @@
struct ixgbevf_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP uses address ptr on irq_clean */
+ void *data;
+ };
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
@@ -89,20 +95,25 @@ struct ixgbevf_rx_queue_stats {
};
enum ixgbevf_ring_state_t {
+ __IXGBEVF_RX_3K_BUFFER,
+ __IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED,
+ __IXGBEVF_TX_XDP_RING,
};
-#define check_for_tx_hang(ring) \
- test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_xdp(ring) \
+ test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
+#define set_ring_xdp(ring) \
+ set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
+#define clear_ring_xdp(ring) \
+ clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
struct ixgbevf_ring {
struct ixgbevf_ring *next;
+ struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
struct device *dev;
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -123,7 +134,7 @@ struct ixgbevf_ring {
struct ixgbevf_tx_queue_stats tx_stats;
struct ixgbevf_rx_queue_stats rx_stats;
};
-
+ struct xdp_rxq_info xdp_rxq;
u64 hw_csum_rx_error;
u8 __iomem *tail;
struct sk_buff *skb;
@@ -133,13 +144,14 @@ struct ixgbevf_ring {
*/
u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */
-};
+} ____cacheline_internodealigned_in_smp;
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
+#define MAX_XDP_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_MAX_RSS_QUEUES 2
#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
@@ -156,12 +168,20 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_RXBUFFER_3072 3072
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
-#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBEVF_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
+#else
+#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
+#endif
+
#define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2)
@@ -170,6 +190,50 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+#define ring_uses_large_buffer(ring) \
+ test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
+static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IXGBEVF_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IXGBEVF_MAX_FRAME_BUILD_SKB;
+#endif
+ return IXGBEVF_RXBUFFER_2048;
+}
+
+static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
+
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+
struct ixgbevf_ring_container {
struct ixgbevf_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -194,7 +258,11 @@ struct ixgbevf_q_vector {
u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBEVF_QV_STATE_IDLE 0
@@ -284,6 +352,10 @@ struct ixgbevf_adapter {
u32 eims_enable_mask;
u32 eims_other;
+ /* XDP */
+ int num_xdp_queues;
+ struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES];
+
/* TX */
int num_tx_queues;
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
@@ -304,6 +376,7 @@ struct ixgbevf_adapter {
/* OS defined structs */
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
/* structs defined in ixgbe_vf.h */
@@ -317,6 +390,7 @@ struct ixgbevf_adapter {
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
+ unsigned int xdp_ring_count;
unsigned int rx_ring_count;
u8 __iomem *io_addr; /* Mainly for iounmap use */
@@ -331,6 +405,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
+ u32 flags;
+#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
};
enum ixbgevf_state_t {
@@ -388,7 +464,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring);
int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9b3d43d28106..3d9033f26eff 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2018 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -50,6 +50,9 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <net/mpls.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/atomic.h>
#include "ixgbevf.h"
@@ -130,6 +133,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
+static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *old_buff);
static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
{
@@ -318,7 +324,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -382,7 +391,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
- pr_err("Detected Tx Unit Hang\n"
+ pr_err("Detected Tx Unit Hang%s\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
@@ -392,6 +401,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
" eop_desc->wb.status <%x>\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
+ ring_is_xdp(tx_ring) ? " XDP" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
@@ -399,7 +409,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
eop_desc, (eop_desc ? eop_desc->wb.status : 0),
tx_ring->tx_buffer_info[i].time_stamp, jiffies);
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ if (!ring_is_xdp(tx_ring))
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
/* schedule immediate reset if we believe we hung */
ixgbevf_tx_timeout_reset(adapter);
@@ -407,6 +419,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
return true;
}
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -527,6 +542,51 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static
+struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
+ const unsigned int size)
+{
+ struct ixgbevf_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ struct sk_buff *skb)
+{
+ if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (IS_ERR(skb))
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbevf_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
/**
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
@@ -554,32 +614,38 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
return true;
}
+static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
+}
+
static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t dma = bi->dma;
+ dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
if (likely(page))
return true;
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
+ __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
@@ -587,7 +653,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ixgbevf_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
rx_ring->rx_stats.alloc_rx_page++;
@@ -621,7 +687,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- IXGBEVF_RX_BUFSZ,
+ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* Refresh the desc even if pkt_addr didn't change
@@ -685,6 +751,10 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -734,11 +804,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -746,17 +815,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_ref_count(page) != pagecnt_bias))
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+#define IXGBEVF_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
return false;
#endif
@@ -765,7 +830,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(pagecnt_bias == 1)) {
+ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -777,136 +842,268 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
**/
-static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
+static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
- u16 size,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = IXGBEVF_RX_BUFSZ;
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
#endif
- unsigned int pull_len;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
+static
+struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ /* prefetch first cache line of first page */
+ prefetch(xdp->data);
+#if L1_CACHE_BYTES < 128
+ prefetch(xdp->data + L1_CACHE_BYTES);
+#endif
+ /* Note, we get here by enabling legacy-rx via:
+ *
+ * ethtool --set-priv-flags <dev> legacy-rx on
+ *
+ * In this mode, we currently get 0 extra XDP headroom as
+ * opposed to having legacy-rx off, where we process XDP
+ * packets going to stack via ixgbevf_build_skb().
+ *
+ * For ixgbevf_construct_skb() mode it means that the
+ * xdp->data_meta will always point to xdp->data, since
+ * the helper cannot expand the head. Should this ever
+ * changed in future for legacy-rx mode on, then lets also
+ * add xdp->data_meta handling here.
+ */
- /* page is not reserved, we can reuse buffer as is */
- if (likely(!ixgbevf_page_is_reserved(page)))
- return true;
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
- /* this page cannot be reused so discard it */
- return false;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IXGBEVF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
- va += pull_len;
- size -= pull_len;
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (xdp->data + headlen) -
+ page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ return skb;
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u32 qmask)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
- return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
-static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ struct xdp_buff *xdp,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- struct ixgbevf_rx_buffer *rx_buffer;
- struct page *page;
- u16 size = le16_to_cpu(rx_desc->wb.upper.length);
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points to xdp->data, otherwise, we likely
+ * have a consumer accessing first few bytes of meta data,
+ * and then actual data.
+ */
+ prefetch(xdp->data_meta);
+#if L1_CACHE_BYTES < 128
+ prefetch(xdp->data_meta + L1_CACHE_BYTES);
+#endif
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
+ /* build an skb around the page buffer */
+ skb = build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
#endif
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBEVF_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- return NULL;
- }
+ return skb;
+}
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+#define IXGBEVF_XDP_PASS 0
+#define IXGBEVF_XDP_CONSUMED 1
+#define IXGBEVF_XDP_TX 2
- /* pull page into skb */
- if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* We are not reusing the buffer so unmap it and free
- * any references we are holding to it
- */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE,
- IXGBEVF_RX_DMA_ATTR);
- __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
- }
+static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
+ struct xdp_buff *xdp)
+{
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+ u16 i;
- /* clear contents of buffer_info */
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
+ len = xdp->data_end - xdp->data;
- return skb;
+ if (unlikely(!ixgbevf_desc_unused(ring)))
+ return IXGBEVF_XDP_CONSUMED;
+
+ dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ return IXGBEVF_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = ring->next_to_use;
+ tx_desc = IXGBEVF_TX_DESC(ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->data = xdp->data;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
+ IXGBE_ADVTXD_CC);
+
+ /* Avoid any potential race with cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ ring->next_to_use = i;
+
+ return IXGBEVF_XDP_TX;
}
-static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
- u32 qmask)
+static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
- struct ixgbe_hw *hw = &adapter->hw;
+ int result = IXGBEVF_XDP_PASS;
+ struct ixgbevf_ring *xdp_ring;
+ struct bpf_prog *xdp_prog;
+ u32 act;
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
+ result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBEVF_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+
+ rx_buffer->page_offset += truesize;
+#endif
}
static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
@@ -914,11 +1111,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
+ bool xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
+ struct ixgbevf_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
@@ -927,8 +1131,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.length)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -937,15 +1141,48 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
*/
rmb();
+ rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
+
/* retrieve a buffer from the ring */
- skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data -
+ ixgbevf_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+
+ skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ xdp_xmit = true;
+ ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
+ size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+ } else if (skb) {
+ ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ } else if (ring_uses_build_skb(rx_ring)) {
+ skb = ixgbevf_build_skb(rx_ring, rx_buffer,
+ &xdp, rx_desc);
+ } else {
+ skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
+ &xdp, rx_desc);
+ }
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
}
+ ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -987,6 +1224,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
+ if (xdp_xmit) {
+ struct ixgbevf_ring *xdp_ring =
+ adapter->xdp_ring[rx_ring->queue_index];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
+ }
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1260,85 +1508,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
- int r_idx)
-{
- struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
-
- a->rx_ring[r_idx]->next = q_vector->rx.ring;
- q_vector->rx.ring = a->rx_ring[r_idx];
- q_vector->rx.count++;
-}
-
-static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
- int t_idx)
-{
- struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
-
- a->tx_ring[t_idx]->next = q_vector->tx.ring;
- q_vector->tx.ring = a->tx_ring[t_idx];
- q_vector->tx.count++;
-}
-
-/**
- * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
- * @adapter: board private structure to initialize
- *
- * This function maps descriptor rings to the queue-specific vectors
- * we were allotted through the MSI-X enabling code. Ideally, we'd have
- * one vector per ring/queue, but on a constrained vector budget, we
- * group the rings as "efficiently" as possible. You would add new
- * mapping configurations in here.
- **/
-static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
-{
- int q_vectors;
- int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int i, j;
- int rqpv, tqpv;
-
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* The ideal configuration...
- * We have enough vectors to map one per queue.
- */
- if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
- map_vector_to_rxq(adapter, v_start, rxr_idx);
-
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- map_vector_to_txq(adapter, v_start, txr_idx);
- return 0;
- }
-
- /* If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
- */
- /* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- }
- for (i = v_start; i < q_vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
- }
-
- return 0;
-}
-
/**
* ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@@ -1411,20 +1580,6 @@ free_queue_irqs:
return err;
}
-static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
-{
- int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- for (i = 0; i < q_vectors; i++) {
- struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
-
- q_vector->rx.ring = NULL;
- q_vector->tx.ring = NULL;
- q_vector->rx.count = 0;
- q_vector->tx.count = 0;
- }
-}
-
/**
* ixgbevf_request_irq - initialize interrupts
* @adapter: board private structure
@@ -1464,8 +1619,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
-
- ixgbevf_reset_q_vectors(adapter);
}
/**
@@ -1583,11 +1736,14 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring, int index)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
@@ -1595,7 +1751,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
@@ -1767,10 +1926,21 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ring->next_to_use = 0;
ring->next_to_alloc = 0;
- ixgbevf_configure_srrctl(adapter, reg_idx);
+ ixgbevf_configure_srrctl(adapter, ring, reg_idx);
+
+ /* RXDCTL.RLPML does not work on 82599 */
+ if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
- /* allow any size packet since we can handle overflow */
- rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
+#if (PAGE_SIZE < 8192)
+ /* Limit the maximum frame size so we don't overrun the skb */
+ if (ring_uses_build_skb(ring) &&
+ !ring_uses_large_buffer(ring))
+ rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
+ IXGBE_RXDCTL_RLPML_EN;
+#endif
+ }
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
@@ -1779,6 +1949,29 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* set build_skb and buffer size flags */
+ clear_ring_build_skb_enabled(rx_ring);
+ clear_ring_uses_large_buffer(rx_ring);
+
+ if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
+ return;
+
+ set_ring_build_skb_enabled(rx_ring);
+
+ if (PAGE_SIZE < 8192) {
+ if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
+ return;
+
+ set_ring_uses_large_buffer(rx_ring);
+ }
+}
+
/**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure
@@ -1806,8 +1999,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+ ixgbevf_set_rx_buffer_len(adapter, rx_ring);
+ ixgbevf_configure_rx_ring(adapter, rx_ring);
+ }
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -2136,13 +2333,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- IXGBEVF_RX_BUFSZ,
+ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
rx_buffer->dma,
- PAGE_SIZE,
+ ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR);
@@ -2172,7 +2369,10 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ if (ring_is_xdp(tx_ring))
+ page_frag_free(tx_buffer->data);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -2240,6 +2440,8 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
}
void ixgbevf_down(struct ixgbevf_adapter *adapter)
@@ -2278,6 +2480,13 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
IXGBE_TXDCTL_SWFLSH);
}
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+ IXGBE_TXDCTL_SWFLSH);
+ }
+
if (!pci_channel_offline(adapter->pdev))
ixgbevf_reset(adapter);
@@ -2375,6 +2584,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+ adapter->num_xdp_queues = 0;
spin_lock_bh(&adapter->mbx_lock);
@@ -2396,8 +2606,13 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ if (adapter->xdp_prog &&
+ hw->mac.max_tx_queues == rss)
+ rss = rss > 3 ? 2 : 1;
+
adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss;
+ adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
default:
break;
}
@@ -2405,105 +2620,209 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
}
/**
- * ixgbevf_alloc_queues - Allocate memory for all rings
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
* @adapter: board private structure to initialize
*
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time. The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
**/
-static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
{
+ int vector, v_budget;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
+ */
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
+
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
+ * does not support any other modes, so we will simply fail here. Note
+ * that we clean up the msix_entries pointer else-where.
+ */
+ return ixgbevf_acquire_msix_vectors(adapter, v_budget);
+}
+
+static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
+ struct ixgbevf_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: number of Tx rings for q vector
+ * @txr_idx: index of first Tx ring to assign
+ * @xdp_count: total number of XDP rings to allocate
+ * @xdp_idx: index of first XDP ring to allocate
+ * @rxr_count: number of Rx rings for q vector
+ * @rxr_idx: index of first Rx ring to assign
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
+ int txr_count, int txr_idx,
+ int xdp_count, int xdp_idx,
+ int rxr_count, int rxr_idx)
+{
+ struct ixgbevf_q_vector *q_vector;
+ int reg_idx = txr_idx + xdp_idx;
struct ixgbevf_ring *ring;
- int rx = 0, tx = 0;
+ int ring_count, size;
+
+ ring_count = txr_count + xdp_count + rxr_count;
+ size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
- for (; tx < adapter->num_tx_queues; tx++) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ while (txr_count) {
+ /* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbevf_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- ring->queue_index = tx;
- ring->reg_idx = tx;
+ ring->queue_index = txr_idx;
+ ring->reg_idx = reg_idx;
- adapter->tx_ring[tx] = ring;
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx++;
+ reg_idx++;
+
+ /* push pointer to next ring */
+ ring++;
}
- for (; rx < adapter->num_rx_queues; rx++) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
+ while (xdp_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbevf_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = xdp_idx;
+ ring->reg_idx = reg_idx;
+ set_ring_xdp(ring);
+
+ /* assign ring to adapter */
+ adapter->xdp_ring[xdp_idx] = ring;
+
+ /* update count and index */
+ xdp_count--;
+ xdp_idx++;
+ reg_idx++;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ ixgbevf_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- ring->queue_index = rx;
- ring->reg_idx = rx;
+ ring->queue_index = rxr_idx;
+ ring->reg_idx = rxr_idx;
- adapter->rx_ring[rx] = ring;
- }
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
- return 0;
+ /* update count and index */
+ rxr_count--;
+ rxr_idx++;
-err_allocation:
- while (tx) {
- kfree(adapter->tx_ring[--tx]);
- adapter->tx_ring[tx] = NULL;
+ /* push pointer to next ring */
+ ring++;
}
- while (rx) {
- kfree(adapter->rx_ring[--rx]);
- adapter->rx_ring[rx] = NULL;
- }
- return -ENOMEM;
+ return 0;
}
/**
- * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
*
- * Attempt to configure the interrupts using the best available
- * capabilities of the hardware and the kernel.
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
**/
-static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
{
- struct net_device *netdev = adapter->netdev;
- int err;
- int vector, v_budget;
-
- /* It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) the same number of vectors as there are CPU's.
- * The default is to use pairs of vectors.
- */
- v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
- v_budget = min_t(int, v_budget, num_online_cpus());
- v_budget += NON_Q_VECTORS;
-
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter.
- */
- adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
- if (!adapter->msix_entries)
- return -ENOMEM;
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct ixgbevf_ring *ring;
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
+ ixgbevf_for_each_ring(ring, q_vector->tx) {
+ if (ring_is_xdp(ring))
+ adapter->xdp_ring[ring->queue_index] = NULL;
+ else
+ adapter->tx_ring[ring->queue_index] = NULL;
+ }
- err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
- if (err)
- return err;
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ adapter->rx_ring[ring->queue_index] = NULL;
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- return err;
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
- return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ /* ixgbevf_get_stats() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
}
/**
@@ -2515,35 +2834,58 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors;
- struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int xdp_remaining = adapter->num_xdp_queues;
+ int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
+ for (; rxr_remaining; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ err = ixgbevf_alloc_q_vector(adapter, v_idx,
+ 0, 0, 0, 0, rqpv, rxr_idx);
+ if (err)
+ goto err_out;
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
- if (!q_vector)
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ }
+ }
+
+ for (; q_vectors; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
+ int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
+
+ err = ixgbevf_alloc_q_vector(adapter, v_idx,
+ tqpv, txr_idx,
+ xqpv, xdp_idx,
+ rqpv, rxr_idx);
+
+ if (err)
goto err_out;
- q_vector->adapter = adapter;
- q_vector->v_idx = q_idx;
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbevf_poll, 64);
- adapter->q_vector[q_idx] = q_vector;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ txr_remaining -= tqpv;
+ txr_idx += tqpv;
+ xdp_remaining -= xqpv;
+ xdp_idx += xqpv;
}
return 0;
err_out:
- while (q_idx) {
- q_idx--;
- q_vector = adapter->q_vector[q_idx];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- napi_hash_del(&q_vector->napi);
-#endif
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- adapter->q_vector[q_idx] = NULL;
+ while (v_idx) {
+ v_idx--;
+ ixgbevf_free_q_vector(adapter, v_idx);
}
+
return -ENOMEM;
}
@@ -2557,17 +2899,11 @@ err_out:
**/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- adapter->q_vector[q_idx] = NULL;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- napi_hash_del(&q_vector->napi);
-#endif
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
+ while (q_vectors) {
+ q_vectors--;
+ ixgbevf_free_q_vector(adapter, q_vectors);
}
}
@@ -2611,21 +2947,14 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = ixgbevf_alloc_queues(adapter);
- if (err) {
- pr_err("Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
- (adapter->num_rx_queues > 1) ? "Enabled" :
- "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_rx_queues, adapter->num_tx_queues,
+ adapter->num_xdp_queues);
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
-err_alloc_queues:
- ixgbevf_free_q_vectors(adapter);
err_alloc_q_vectors:
ixgbevf_reset_interrupt_capability(adapter);
err_set_interrupt:
@@ -2641,18 +2970,8 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
-
adapter->num_tx_queues = 0;
+ adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0;
ixgbevf_free_q_vectors(adapter);
@@ -2860,6 +3179,8 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ set_check_for_tx_hang(adapter->xdp_ring[i]);
}
/* get one bit for every active Tx/Rx interrupt vector */
@@ -3031,6 +3352,9 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_xdp_queues; i++)
+ if (adapter->xdp_ring[i]->desc)
+ ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
}
/**
@@ -3081,26 +3405,44 @@ err:
**/
static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
{
- int i, err = 0;
+ int i, j = 0, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
+ goto err_setup_tx;
+ }
+
+ for (j = 0; j < adapter->num_xdp_queues; j++) {
+ err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
break;
}
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (j--)
+ ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
+ while (i--)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+
return err;
}
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
{
int size;
@@ -3121,6 +3463,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
if (!rx_ring->desc)
goto err;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -3144,12 +3493,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
- break;
+ goto err_setup_rx;
}
+
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
return err;
}
@@ -3163,6 +3518,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
{
ixgbevf_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -3244,28 +3601,31 @@ int ixgbevf_open(struct net_device *netdev)
ixgbevf_configure(adapter);
- /* Map the Tx/Rx rings to the vectors we were allotted.
- * if request_irq will be called in this function map_rings
- * must be called *before* up_complete
- */
- ixgbevf_map_rings_to_vectors(adapter);
-
err = ixgbevf_request_irq(adapter);
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
ixgbevf_up_complete(adapter);
return 0;
+err_set_queues:
+ ixgbevf_free_irq(adapter);
err_req_irq:
- ixgbevf_down(adapter);
-err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
ixgbevf_free_all_tx_resources(adapter);
+err_setup_tx:
ixgbevf_reset(adapter);
-
err_setup_reset:
return err;
@@ -3707,11 +4067,10 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
-static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbevf_ring *tx_ring)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_tx_buffer *first;
- struct ixgbevf_ring *tx_ring;
int tso;
u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
@@ -3726,8 +4085,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
-
/* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
@@ -3780,6 +4137,29 @@ out_drop:
return NETDEV_TX_OK;
}
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbevf_xmit_frame_ring(skb, tx_ring);
+}
+
/**
* ixgbevf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -3826,6 +4206,12 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int ret;
+ /* prevent MTU being changed to a size unsupported by XDP */
+ if (adapter->xdp_prog) {
+ dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
+ return -EPERM;
+ }
+
spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
@@ -3839,6 +4225,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
return 0;
}
@@ -3917,17 +4306,11 @@ static int ixgbevf_resume(struct pci_dev *pdev)
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
+ if (!err && netif_running(netdev))
+ err = ixgbevf_open(netdev);
rtnl_unlock();
- if (err) {
- dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+ if (err)
return err;
- }
-
- if (netif_running(netdev)) {
- err = ixgbevf_open(netdev);
- if (err)
- return err;
- }
netif_device_attach(netdev);
@@ -3940,6 +4323,23 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_suspend(pdev, PMSG_SUSPEND);
}
+static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
+ const struct ixgbevf_ring *ring)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ }
+}
+
static void ixgbevf_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3953,6 +4353,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
@@ -3966,14 +4367,14 @@ static void ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- bytes = ring->stats.bytes;
- packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
+ ixgbevf_get_tx_ring_stats(stats, ring);
+ }
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ ring = adapter->xdp_ring[i];
+ ixgbevf_get_tx_ring_stats(stats, ring);
}
+ rcu_read_unlock();
}
#define IXGBEVF_MAX_MAC_HDR_LEN 127
@@ -4010,6 +4411,64 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct ixgbevf_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ /* verify ixgbevf ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = adapter->rx_ring[i];
+
+ if (frame_size > ixgbevf_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+
+ /* If transitioning XDP modes reconfigure rings */
+ if (!!prog != !!old_prog) {
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ixgbevf_xdp_setup(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!(adapter->xdp_prog);
+ xdp->prog_id = adapter->xdp_prog ?
+ adapter->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -4026,6 +4485,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_poll_controller = ixgbevf_netpoll,
#endif
.ndo_features_check = ixgbevf_features_check,
+ .ndo_bpf = ixgbevf_xdp,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index bc0442acae78..5ec947fe3d09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*******************************************************************************
Intel 82599 Virtual Function driver
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 2764fd16261f..278f73980501 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*******************************************************************************
Intel 82599 Virtual Function driver
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index c651fefcc3d2..194fbdaa4519 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*******************************************************************************
Intel 82599 Virtual Function driver