diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2006-09-20 12:01:06 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 15:19:54 -0700 |
commit | 5251e2d2125407bbff0c39394a4011be9ed8b5d0 (patch) | |
tree | 3dda0aeb90d80a2ddd0e7a4215bfe9eaa8209033 /net/netfilter | |
parent | 01f348484dd8509254d045e3ad49029716eca6a1 (diff) |
[NETFILTER]: conntrack: fix race condition in early_drop
On SMP environments the maximum number of conntracks can be overpassed
under heavy stress situations due to an existing race condition.
CPU A CPU B
atomic_read() ...
early_drop() ...
... atomic_read()
allocate conntrack allocate conntrack
atomic_inc() atomic_inc()
This patch moves the counter incrementation before the early drop stage.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 927137b8b3b..adeafa2cc33 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -848,11 +848,15 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, nf_conntrack_hash_rnd_initted = 1; } + /* We don't want any race condition at early drop stage */ + atomic_inc(&nf_conntrack_count); + if (nf_conntrack_max - && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) { + && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { unsigned int hash = hash_conntrack(orig); /* Try dropping from this hash chain. */ if (!early_drop(&nf_conntrack_hash[hash])) { + atomic_dec(&nf_conntrack_count); if (net_ratelimit()) printk(KERN_WARNING "nf_conntrack: table full, dropping" @@ -903,10 +907,12 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, init_timer(&conntrack->timeout); conntrack->timeout.data = (unsigned long)conntrack; conntrack->timeout.function = death_by_timeout; + read_unlock_bh(&nf_ct_cache_lock); - atomic_inc(&nf_conntrack_count); + return conntrack; out: read_unlock_bh(&nf_ct_cache_lock); + atomic_dec(&nf_conntrack_count); return conntrack; } |