aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2006-09-20 12:01:06 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 15:19:54 -0700
commit5251e2d2125407bbff0c39394a4011be9ed8b5d0 (patch)
tree3dda0aeb90d80a2ddd0e7a4215bfe9eaa8209033 /net/ipv4
parent01f348484dd8509254d045e3ad49029716eca6a1 (diff)
[NETFILTER]: conntrack: fix race condition in early_drop
On SMP environments the maximum number of conntracks can be overpassed under heavy stress situations due to an existing race condition. CPU A CPU B atomic_read() ... early_drop() ... ... atomic_read() allocate conntrack allocate conntrack atomic_inc() atomic_inc() This patch moves the counter incrementation before the early drop stage. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 2568d480e9a..422a662194c 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -622,11 +622,15 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
ip_conntrack_hash_rnd_initted = 1;
}
+ /* We don't want any race condition at early drop stage */
+ atomic_inc(&ip_conntrack_count);
+
if (ip_conntrack_max
- && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
+ && atomic_read(&ip_conntrack_count) > ip_conntrack_max) {
unsigned int hash = hash_conntrack(orig);
/* Try dropping from this hash chain. */
if (!early_drop(&ip_conntrack_hash[hash])) {
+ atomic_dec(&ip_conntrack_count);
if (net_ratelimit())
printk(KERN_WARNING
"ip_conntrack: table full, dropping"
@@ -638,6 +642,7 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
if (!conntrack) {
DEBUGP("Can't allocate conntrack.\n");
+ atomic_dec(&ip_conntrack_count);
return ERR_PTR(-ENOMEM);
}
@@ -651,8 +656,6 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
conntrack->timeout.data = (unsigned long)conntrack;
conntrack->timeout.function = death_by_timeout;
- atomic_inc(&ip_conntrack_count);
-
return conntrack;
}