diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 139 |
1 files changed, 91 insertions, 48 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 33370271b8b2..0e501714d47f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -117,11 +117,11 @@ struct nf_conntrack { #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info { - atomic_t use; - struct net_device *physindev; - struct net_device *physoutdev; - unsigned int mask; - unsigned long data[32 / sizeof(unsigned long)]; + atomic_t use; + unsigned int mask; + struct net_device *physindev; + struct net_device *physoutdev; + unsigned long data[32 / sizeof(unsigned long)]; }; #endif @@ -238,11 +238,12 @@ enum { /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. - * The desc is used to track userspace buffer index. + * The ctx field is used to track device context. + * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(void *); - void *arg; + void (*callback)(struct ubuf_info *); + void *ctx; unsigned long desc; }; @@ -469,7 +470,8 @@ struct sk_buff { __u8 wifi_acked_valid:1; __u8 wifi_acked:1; __u8 no_fcs:1; - /* 9/11 bit hole (depending on ndisc_nodetype presence) */ + __u8 head_frag:1; + /* 8/10 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #ifdef CONFIG_NET_DMA @@ -481,6 +483,7 @@ struct sk_buff { union { __u32 mark; __u32 dropcount; + __u32 avail_size; }; sk_buff_data_t transport_header; @@ -558,9 +561,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb) extern void kfree_skb(struct sk_buff *skb); extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); +extern struct kmem_cache *skbuff_head_cache; + +extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); +extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize); + extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node); -extern struct sk_buff *build_skb(void *data); +extern struct sk_buff *build_skb(void *data, unsigned int frag_size); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -641,11 +650,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->head + skb->end; } + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end; +} #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { return skb->end; } + +static inline unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end - skb->head; +} #endif /* Internal */ @@ -879,10 +898,11 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) { - struct sk_buff *list = ((const struct sk_buff *)list_)->next; - if (list == (struct sk_buff *)list_) - list = NULL; - return list; + struct sk_buff *skb = list_->next; + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; } /** @@ -898,6 +918,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, const struct sk_buff_head *list_) { struct sk_buff *next = skb->next; + if (next == (struct sk_buff *)list_) next = NULL; return next; @@ -918,10 +939,12 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, */ static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) { - struct sk_buff *list = ((const struct sk_buff *)list_)->prev; - if (list == (struct sk_buff *)list_) - list = NULL; - return list; + struct sk_buff *skb = list_->prev; + + if (skb == (struct sk_buff *)list_) + skb = NULL; + return skb; + } /** @@ -1018,7 +1041,7 @@ static inline void skb_queue_splice(const struct sk_buff_head *list, } /** - * skb_queue_splice - join two skb lists and reinitialise the emptied list + * skb_queue_splice_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * @@ -1049,7 +1072,7 @@ static inline void skb_queue_splice_tail(const struct sk_buff_head *list, } /** - * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * @@ -1366,6 +1389,18 @@ static inline int skb_tailroom(const struct sk_buff *skb) } /** + * skb_availroom - bytes at buffer end + * @skb: buffer to check + * + * Return the number of bytes of free space at the tail of an sk_buff + * allocated by sk_stream_alloc() + */ +static inline int skb_availroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; +} + +/** * skb_reserve - adjust headroom * @skb: buffer to alter * @len: bytes to move @@ -1650,31 +1685,11 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -/** - * __dev_alloc_skb - allocate an skbuff for receiving - * @length: length to allocate - * @gfp_mask: get_free_pages mask, passed to alloc_skb - * - * Allocate a new &sk_buff and assign it a usage count of one. The - * buffer has unspecified headroom built in. Users should allocate - * the headroom they think they need without accounting for the - * built in space. The built in space is used for optimisations. - * - * %NULL is returned if there is no free memory. - */ -static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - gfp_t gfp_mask) -{ - struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); - if (likely(skb)) - skb_reserve(skb, NET_SKB_PAD); - return skb; -} - -extern struct sk_buff *dev_alloc_skb(unsigned int length); +extern void *netdev_alloc_frag(unsigned int fragsz); extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, - unsigned int length, gfp_t gfp_mask); + unsigned int length, + gfp_t gfp_mask); /** * netdev_alloc_skb - allocate an skbuff for rx on a specific device @@ -1690,11 +1705,25 @@ extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, - unsigned int length) + unsigned int length) { return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } +/* legacy helper around __netdev_alloc_skb() */ +static inline struct sk_buff *__dev_alloc_skb(unsigned int length, + gfp_t gfp_mask) +{ + return __netdev_alloc_skb(NULL, length, gfp_mask); +} + +/* legacy helper around netdev_alloc_skb() */ +static inline struct sk_buff *dev_alloc_skb(unsigned int length) +{ + return netdev_alloc_skb(NULL, length); +} + + static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, unsigned int length, gfp_t gfp) { @@ -1949,8 +1978,8 @@ static inline int skb_add_data(struct sk_buff *skb, return -EFAULT; } -static inline int skb_can_coalesce(struct sk_buff *skb, int i, - const struct page *page, int off) +static inline bool skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) { if (i) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; @@ -1958,7 +1987,7 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, return page == skb_frag_page(frag) && off == frag->page_offset + skb_frag_size(frag); } - return 0; + return false; } static inline int __skb_linearize(struct sk_buff *skb) @@ -2538,7 +2567,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) return false; skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); - if (skb_end_pointer(skb) - skb->head < skb_size) + if (skb_end_offset(skb) < skb_size) return false; if (skb_shared(skb) || skb_cloned(skb)) @@ -2546,5 +2575,19 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) return true; } + +/** + * skb_head_is_locked - Determine if the skb->head is locked down + * @skb: skb to check + * + * The head on skbs build around a head frag can be removed if they are + * not cloned. This function returns true if the skb head is locked down + * due to either being allocated via kmalloc, or by being a clone with + * multiple references to the head. + */ +static inline bool skb_head_is_locked(const struct sk_buff *skb) +{ + return !skb->head_frag || skb_cloned(skb); +} #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ |