summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2005-08-10 07:02:13 +0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-30 02:38:54 +0400
commita86888b925299330053d20e0eba03ac4d2648c4b (patch)
tree27c2d22d98a9eed22749df1a8d32f72e1b5a2468 /net/ipv4
parenta55ebcc4c4532107ad9eee1c9bb698ab5f12c00f (diff)
downloadlinux-a86888b925299330053d20e0eba03ac4d2648c4b.tar.xz
[NETFILTER]: Fix multiple problems with the conntrack event cache
refcnt underflow: the reference count is decremented when a conntrack entry is removed from the hash but it is not incremented when entering new entries. missing protection of process context against softirq context: all cache operations need to locally disable softirqs to avoid races. Additionally the event cache can't be initialized when a packet enteres the conntrack code but needs to be initialized whenever we cache an event and the stored conntrack entry doesn't match the current one. incorrect flushing of the event cache in ip_ct_iterate_cleanup: without real locking we can't flush the cache for different CPUs without incurring races. The cache for different CPUs can only be flushed when no packets are going through the code. ip_ct_iterate_cleanup doesn't need to drop all references, so flushing is moved to the cleanup path. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c105
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c3
2 files changed, 39 insertions, 69 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index d9fddae8d787..5c3f16eae2d8 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -85,73 +85,62 @@ struct notifier_block *ip_conntrack_expect_chain;
DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
-static inline void __deliver_cached_events(struct ip_conntrack_ecache *ecache)
+/* deliver cached events and clear cache entry - must be called with locally
+ * disabled softirqs */
+static inline void
+__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
{
+ DEBUGP("ecache: delivering events for %p\n", ecache->ct);
if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
notifier_call_chain(&ip_conntrack_chain, ecache->events,
ecache->ct);
ecache->events = 0;
-}
-
-void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
-{
- __deliver_cached_events(ecache);
+ ip_conntrack_put(ecache->ct);
+ ecache->ct = NULL;
}
/* Deliver all cached events for a particular conntrack. This is called
* by code prior to async packet handling or freeing the skb */
-void
-ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct)
+void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
{
- struct ip_conntrack_ecache *ecache =
- &__get_cpu_var(ip_conntrack_ecache);
-
- if (!ct)
- return;
+ struct ip_conntrack_ecache *ecache;
+
+ local_bh_disable();
+ ecache = &__get_cpu_var(ip_conntrack_ecache);
+ if (ecache->ct == ct)
+ __ip_ct_deliver_cached_events(ecache);
+ local_bh_enable();
+}
- if (ecache->ct == ct) {
- DEBUGP("ecache: delivering event for %p\n", ct);
- __deliver_cached_events(ecache);
- } else {
- if (net_ratelimit())
- printk(KERN_WARNING "ecache: want to deliver for %p, "
- "but cache has %p\n", ct, ecache->ct);
- }
+void __ip_ct_event_cache_init(struct ip_conntrack *ct)
+{
+ struct ip_conntrack_ecache *ecache;
- /* signalize that events have already been delivered */
- ecache->ct = NULL;
+ /* take care of delivering potentially old events */
+ ecache = &__get_cpu_var(ip_conntrack_ecache);
+ BUG_ON(ecache->ct == ct);
+ if (ecache->ct)
+ __ip_ct_deliver_cached_events(ecache);
+ /* initialize for this conntrack/packet */
+ ecache->ct = ct;
+ nf_conntrack_get(&ct->ct_general);
}
-/* Deliver cached events for old pending events, if current conntrack != old */
-void ip_conntrack_event_cache_init(const struct sk_buff *skb)
+/* flush the event cache - touches other CPU's data and must not be called while
+ * packets are still passing through the code */
+static void ip_ct_event_cache_flush(void)
{
- struct ip_conntrack *ct = (struct ip_conntrack *) skb->nfct;
- struct ip_conntrack_ecache *ecache =
- &__get_cpu_var(ip_conntrack_ecache);
+ struct ip_conntrack_ecache *ecache;
+ int cpu;
- /* take care of delivering potentially old events */
- if (ecache->ct != ct) {
- enum ip_conntrack_info ctinfo;
- /* we have to check, since at startup the cache is NULL */
- if (likely(ecache->ct)) {
- DEBUGP("ecache: entered for different conntrack: "
- "ecache->ct=%p, skb->nfct=%p. delivering "
- "events\n", ecache->ct, ct);
- __deliver_cached_events(ecache);
+ for_each_cpu(cpu) {
+ ecache = &per_cpu(ip_conntrack_ecache, cpu);
+ if (ecache->ct)
ip_conntrack_put(ecache->ct);
- } else {
- DEBUGP("ecache: entered for conntrack %p, "
- "cache was clean before\n", ct);
- }
-
- /* initialize for this conntrack/packet */
- ecache->ct = ip_conntrack_get(skb, &ctinfo);
- /* ecache->events cleared by __deliver_cached_devents() */
- } else {
- DEBUGP("ecache: re-entered for conntrack %p.\n", ct);
}
}
-
+#else
+static inline void ip_ct_event_cache_flush(void) {}
#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
@@ -878,8 +867,6 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
IP_NF_ASSERT((*pskb)->nfct);
- ip_conntrack_event_cache_init(*pskb);
-
ret = proto->packet(ct, *pskb, ctinfo);
if (ret < 0) {
/* Invalid: inverse of the return code tells
@@ -1278,23 +1265,6 @@ ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data)
ip_conntrack_put(ct);
}
-
-#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
- {
- /* we need to deliver all cached events in order to drop
- * the reference counts */
- int cpu;
- for_each_cpu(cpu) {
- struct ip_conntrack_ecache *ecache =
- &per_cpu(ip_conntrack_ecache, cpu);
- if (ecache->ct) {
- __ip_ct_deliver_cached_events(ecache);
- ip_conntrack_put(ecache->ct);
- ecache->ct = NULL;
- }
- }
- }
-#endif
}
/* Fast function for those who don't want to parse /proc (and I don't
@@ -1381,6 +1351,7 @@ void ip_conntrack_flush()
delete... */
synchronize_net();
+ ip_ct_event_cache_flush();
i_see_dead_people:
ip_ct_iterate_cleanup(kill_all, NULL);
if (atomic_read(&ip_conntrack_count) != 0) {
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index ca97c3ac2f2a..ee5895afd0c3 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -401,7 +401,6 @@ static unsigned int ip_confirm(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- ip_conntrack_event_cache_init(*pskb);
/* We've seen it coming out the other side: confirm it */
return ip_conntrack_confirm(pskb);
}
@@ -419,7 +418,6 @@ static unsigned int ip_conntrack_help(unsigned int hooknum,
ct = ip_conntrack_get(*pskb, &ctinfo);
if (ct && ct->helper) {
unsigned int ret;
- ip_conntrack_event_cache_init(*pskb);
ret = ct->helper->help(pskb, ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
@@ -978,6 +976,7 @@ EXPORT_SYMBOL_GPL(ip_conntrack_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier);
EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier);
+EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init);
EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache);
#endif
EXPORT_SYMBOL(ip_conntrack_protocol_register);