summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h66
1 files changed, 56 insertions, 10 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d1a687444b27..67e25f6d15a4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -377,6 +377,8 @@ struct napi_config {
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
+ /* This field should be first or softnet_data.backlog needs tweaks. */
+ unsigned long state;
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
@@ -385,7 +387,6 @@ struct napi_struct {
*/
struct list_head poll_list;
- unsigned long state;
int weight;
u32 defer_hard_irqs_count;
int (*poll)(struct napi_struct *, int);
@@ -422,11 +423,12 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_IN_BUSY_POLL, /* Do not rearm NAPI interrupt */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
+ NAPI_STATE_THREADED_BUSY_POLL, /* The threaded NAPI poller will busy poll */
};
enum {
@@ -441,6 +443,7 @@ enum {
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
+ NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL),
};
enum gro_result {
@@ -874,6 +877,7 @@ enum net_device_path_type {
DEV_PATH_PPPOE,
DEV_PATH_DSA,
DEV_PATH_MTK_WDMA,
+ DEV_PATH_TUN,
};
struct net_device_path {
@@ -886,6 +890,18 @@ struct net_device_path {
u8 h_dest[ETH_ALEN];
} encap;
struct {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+
+ u8 l3_proto;
+ } tun;
+ struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
@@ -1815,6 +1831,8 @@ enum netdev_reg_state {
*
* @mpls_features: Mask of features inheritable by MPLS
* @gso_partial_features: value(s) from NETIF_F_GSO\*
+ * @mangleid_features: Mask of features requiring MANGLEID, will be
+ * disabled together with the latter.
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -2203,6 +2221,7 @@ struct net_device {
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
+ netdev_features_t mangleid_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -3401,6 +3420,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
@@ -3529,9 +3549,17 @@ struct softnet_data {
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
+
+ /* We force a cacheline alignment from here, to hold together
+ * input_queue_tail, input_pkt_queue and backlog.state.
+ * We add holes so that backlog.state is the last field
+ * of this cache line.
+ */
+ long pad[3] ____cacheline_aligned_in_smp;
unsigned int input_queue_tail;
#endif
struct sk_buff_head input_pkt_queue;
+
struct napi_struct backlog;
struct numa_drop_counters drop_counters;
@@ -4683,7 +4711,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
@@ -4701,7 +4729,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
@@ -4710,7 +4738,7 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok)) {
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
return ok;
@@ -4718,14 +4746,14 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
@@ -4818,6 +4846,23 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_enable();
}
+#ifndef CONFIG_PREEMPT_RT
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ return READ_ONCE(txq->xmit_lock_owner) == cpu;
+}
+
+#else
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
+}
+
+#endif
+
static inline void netif_addr_lock(struct net_device *dev)
{
unsigned char nest_level = 0;
@@ -5138,8 +5183,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);
-/* RSS keys are 40 or 52 bytes long */
-#define NETDEV_RSS_KEY_LEN 52
+#define NETDEV_RSS_KEY_LEN 256
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
@@ -5298,12 +5342,14 @@ netdev_features_t netdev_increment_features(netdev_features_t all,
static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
netdev_features_t mask)
{
- return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+ return netdev_increment_features(features, NETIF_F_ALL_TSO |
+ NETIF_F_ALL_FOR_ALL, mask);
}
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);