diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 70 |
1 files changed, 64 insertions, 6 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7cdfb4d52847..f5aa87e1e0c8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -380,7 +380,10 @@ struct sk_buff { kmemcheck_bitfield_begin(flags2); __u16 queue_mapping:16; #ifdef CONFIG_IPV6_NDISC_NODETYPE - __u8 ndisc_nodetype:2; + __u8 ndisc_nodetype:2, + deliver_no_wcard:1; +#else + __u8 deliver_no_wcard:1; #endif kmemcheck_bitfield_end(flags2); @@ -501,7 +504,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } -extern int skb_recycle_check(struct sk_buff *skb, int skb_size); +extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, @@ -1411,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb) * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. - * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span - * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes + * + * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) + * to reduce average number of cache lines per packet. + * get_rps_cpus() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD 64 +#define NET_SKB_PAD max(32, L1_CACHE_BYTES) #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); @@ -1928,6 +1933,36 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +extern void skb_timestamping_init(void); + +#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + +extern void skb_clone_tx_timestamp(struct sk_buff *skb); +extern bool skb_defer_rx_timestamp(struct sk_buff *skb); + +#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + +static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ +} + +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + +/** + * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps + * + * @skb: clone of the the original outgoing packet + * @hwtstamps: hardware time stamps + * + */ +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + /** * skb_tstamp_tx - queue clone of skb with send time stamps * @orig_skb: the original outgoing packet @@ -1942,6 +1977,28 @@ static inline ktime_t net_invalid_timestamp(void) extern void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps); +static inline void sw_tx_timestamp(struct sk_buff *skb) +{ + union skb_shared_tx *shtx = skb_tx(skb); + if (shtx->software && !shtx->in_progress) + skb_tstamp_tx(skb, NULL); +} + +/** + * skb_tx_timestamp() - Driver hook for transmit timestamping + * + * Ethernet MAC Drivers should call this function in their hard_xmit() + * function as soon as possible after giving the sk_buff to the MAC + * hardware, but before freeing the sk_buff. + * + * @skb: A socket buffer. + */ +static inline void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + sw_tx_timestamp(skb); +} + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); @@ -2129,7 +2186,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb) /* LRO sets gso_size but not gso_type, whereas if GSO is really * wanted then gso_type will be set. */ struct skb_shared_info *shinfo = skb_shinfo(skb); - if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + unlikely(shinfo->gso_type == 0)) { __skb_warn_lro_forwarding(skb); return true; } |