diff options
Diffstat (limited to 'include/linux/netdevice.h')
| -rw-r--r-- | include/linux/netdevice.h | 47 | 
1 files changed, 39 insertions, 8 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f3a3b761abfb..d1a687444b27 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1906,6 +1906,7 @@ enum netdev_reg_state {   *			 device struct   *	@mpls_ptr:	mpls_dev struct pointer   *	@mctp_ptr:	MCTP specific data + *	@psp_dev:	PSP crypto device registered for this netdev   *   *	@dev_addr:	Hw address (before bcast,   *			because most packets are unicast) @@ -2310,6 +2311,9 @@ struct net_device {  #if IS_ENABLED(CONFIG_MCTP)  	struct mctp_dev __rcu	*mctp_ptr;  #endif +#if IS_ENABLED(CONFIG_INET_PSP) +	struct psp_dev __rcu	*psp_dev; +#endif  /*   * Cache lines mostly used on receive path (including eth_type_trans()) @@ -3459,6 +3463,32 @@ static inline bool dev_has_header(const struct net_device *dev)  	return dev->header_ops && dev->header_ops->create;  } +struct numa_drop_counters { +	atomic_t	drops0 ____cacheline_aligned_in_smp; +	atomic_t	drops1 ____cacheline_aligned_in_smp; +}; + +static inline int numa_drop_read(const struct numa_drop_counters *ndc) +{ +	return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1); +} + +static inline void numa_drop_add(struct numa_drop_counters *ndc, int val) +{ +	int n = numa_node_id() % 2; + +	if (n) +		atomic_add(val, &ndc->drops1); +	else +		atomic_add(val, &ndc->drops0); +} + +static inline void numa_drop_reset(struct numa_drop_counters *ndc) +{ +	atomic_set(&ndc->drops0, 0); +	atomic_set(&ndc->drops1, 0); +} +  /*   * Incoming packets are placed on per-CPU queues   */ @@ -3504,13 +3534,9 @@ struct softnet_data {  	struct sk_buff_head	input_pkt_queue;  	struct napi_struct	backlog; -	atomic_t		dropped ____cacheline_aligned_in_smp; +	struct numa_drop_counters drop_counters; -	/* Another possibly contended cache line */ -	spinlock_t		defer_lock ____cacheline_aligned_in_smp; -	int			defer_count; -	int			defer_ipi_scheduled; -	struct sk_buff		*defer_list; +	int			defer_ipi_scheduled ____cacheline_aligned_in_smp;  	call_single_data_t	defer_csd;  }; @@ -5290,13 +5316,18 @@ void skb_warn_bad_offload(const struct sk_buff *skb);  static inline bool net_gso_ok(netdev_features_t features, int gso_type)  { -	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; +	netdev_features_t feature; + +	if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER)) +		gso_type |= __SKB_GSO_TCP_FIXEDID; + +	feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;  	/* check flags correspondence */  	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));  	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));  	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); -	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); +	BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));  	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));  	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));  	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));  | 
