summaryrefslogtreecommitdiff
path: root/include/net/inetpeer.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-06-02 16:26:03 +0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-08-14 04:42:35 +0400
commitad52eef552c7896ec6024ee72fc126167fe5c4e2 (patch)
treead82cf940ab103a6b51260f681b22d21f6ecdb2c /include/net/inetpeer.h
parent0a9d91dca3b9f797f2fc615486c12afa59f19a3b (diff)
downloadlinux-ad52eef552c7896ec6024ee72fc126167fe5c4e2.tar.xz
inetpeer: get rid of ip_id_count
[ Upstream commit 73f156a6e8c1074ac6327e0abd1169e95eb66463 ] Ideally, we would need to generate IP ID using a per destination IP generator. linux kernels used inet_peer cache for this purpose, but this had a huge cost on servers disabling MTU discovery. 1) each inet_peer struct consumes 192 bytes 2) inetpeer cache uses a binary tree of inet_peer structs, with a nominal size of ~66000 elements under load. 3) lookups in this tree are hitting a lot of cache lines, as tree depth is about 20. 4) If server deals with many tcp flows, we have a high probability of not finding the inet_peer, allocating a fresh one, inserting it in the tree with same initial ip_id_count, (cf secure_ip_id()) 5) We garbage collect inet_peer aggressively. IP ID generation do not have to be 'perfect' Goal is trying to avoid duplicates in a short period of time, so that reassembly units have a chance to complete reassembly of fragments belonging to one message before receiving other fragments with a recycled ID. We simply use an array of generators, and a Jenkin hash using the dst IP as a key. ipv6_select_ident() is put back into net/ipv6/ip6_output.c where it belongs (it is only used from this file) secure_ip_id() and secure_ipv6_id() no longer are needed. Rename ip_select_ident_more() to ip_select_ident_segs() to avoid unnecessary decrement/increment of the number of segments. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/net/inetpeer.h')
-rw-r--r--include/net/inetpeer.h14
1 files changed, 2 insertions, 12 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 2d643649f0f8..168d30dfe807 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -46,13 +46,12 @@ struct inet_peer {
};
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+ * are not available: rid, tcp_ts, tcp_ts_stamp
* We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
__u32 tcp_ts;
__u32 tcp_ts_stamp;
};
@@ -102,7 +101,7 @@ extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
extern void inetpeer_invalidate_tree(int family);
/*
- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * temporary check to make sure we dont access rid, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer
*/
static inline void inet_peer_refcheck(const struct inet_peer *p)
@@ -110,13 +109,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
}
-
-/* can be called with or without local BH being disabled */
-static inline int inet_getid(struct inet_peer *p, int more)
-{
- more++;
- inet_peer_refcheck(p);
- return atomic_add_return(more, &p->ip_id_count) - more;
-}
-
#endif /* _NET_INETPEER_H */