diff options
author | Paolo Abeni <pabeni@redhat.com> | 2017-06-12 12:23:42 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-12 17:01:29 +0300 |
commit | 0a463c78d25b9464b77311d9dda297550a2d6aa5 (patch) | |
tree | 87152851619730150299fe4acf3a7a0eff8a6f4b | |
parent | 3889a803e1da9bd7cd10d6504bf281ee7e55dfd6 (diff) | |
download | linux-0a463c78d25b9464b77311d9dda297550a2d6aa5.tar.xz |
udp: avoid a cache miss on dequeue
Since UDP no more uses sk->destructor, we can clear completely
the skb head state before enqueuing. Amend and use
skb_release_head_state() for that.
All head states share a single cacheline, which is not
normally used/accesses on dequeue. We can avoid entirely accessing
such cacheline implementing and using in the UDP code a specialized
skb free helper which ignores the skb head state.
This saves a cacheline miss at skb deallocation time.
v1 -> v2:
replaced secpath_reset() with skb_release_head_state()
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/skbuff.h | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 24 | ||||
-rw-r--r-- | net/ipv4/udp.c | 6 |
3 files changed, 27 insertions, 5 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index decce3655a48..d66d4feaac86 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -880,10 +880,12 @@ static inline bool skb_unref(struct sk_buff *skb) return true; } +void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); +void consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 747263c49838..304602784c3b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -643,12 +643,10 @@ fastpath: kmem_cache_free(skbuff_fclone_cache, fclones); } -static void skb_release_head_state(struct sk_buff *skb) +void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); -#ifdef CONFIG_XFRM - secpath_put(skb->sp); -#endif + secpath_reset(skb); if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); @@ -751,6 +749,24 @@ void consume_skb(struct sk_buff *skb) } EXPORT_SYMBOL(consume_skb); +/** + * consume_stateless_skb - free an skbuff, assuming it is stateless + * @skb: buffer to free + * + * Works like consume_skb(), but this variant assumes that all the head + * states have been already dropped. + */ +void consume_stateless_skb(struct sk_buff *skb) +{ + if (!skb_unref(skb)) + return; + + trace_consume_skb(skb); + if (likely(skb->head)) + skb_release_data(skb); + kfree_skbmem(skb); +} + void __kfree_skb_flush(void) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fdcb7437cc15..d8b265f1a33b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1359,7 +1359,8 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) sk_peek_offset_bwd(sk, len); unlock_sock_fast(sk, slow); } - consume_skb(skb); + + consume_stateless_skb(skb); } EXPORT_SYMBOL_GPL(skb_consume_udp); @@ -1739,6 +1740,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sk_mark_napi_id_once(sk, skb); } + /* clear all pending head states while they are hot in the cache */ + skb_release_head_state(skb); + rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); |