aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-06-23 02:03:05 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 07:42:46 -0700
commit729bd0b74ce9ac6c829109052fcd565f5c366ca5 (patch)
treecfb2660d6107251f63f7480e193aca7c280ca5ab
parent4da5eda0dca9730f59f391230304526ab4bffec7 (diff)
[PATCH] slab: extract cache_free_alien from __cache_free
Move alien object freeing to cache_free_alien() to reduce #ifdef clutter in __cache_free(). Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c77
1 files changed, 42 insertions, 35 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f1b644eb39d..bf05ea900ce 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1024,6 +1024,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
}
}
}
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ struct slab *slabp = virt_to_slab(objp);
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3;
+ struct array_cache *alien = NULL;
+
+ /*
+ * Make sure we are not freeing a object from another node to the array
+ * cache on this cpu.
+ */
+ if (likely(slabp->nodeid == numa_node_id()))
+ return 0;
+
+ l3 = cachep->nodelists[numa_node_id()];
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
+ __drain_alien_cache(cachep, alien, nodeid);
+ }
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+ }
+ return 1;
+}
+
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
@@ -1038,6 +1072,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ return 0;
+}
+
#endif
static int cpuup_callback(struct notifier_block *nfb,
@@ -3087,41 +3126,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- /* Make sure we are not freeing a object from another
- * node to the array cache on this cpu.
- */
-#ifdef CONFIG_NUMA
- {
- struct slab *slabp;
- slabp = virt_to_slab(objp);
- if (unlikely(slabp->nodeid != numa_node_id())) {
- struct array_cache *alien = NULL;
- int nodeid = slabp->nodeid;
- struct kmem_list3 *l3;
-
- l3 = cachep->nodelists[numa_node_id()];
- STATS_INC_NODEFREES(cachep);
- if (l3->alien && l3->alien[nodeid]) {
- alien = l3->alien[nodeid];
- spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep,
- alien, nodeid);
- }
- alien->entry[alien->avail++] = objp;
- spin_unlock(&alien->lock);
- } else {
- spin_lock(&(cachep->nodelists[nodeid])->
- list_lock);
- free_block(cachep, &objp, 1, nodeid);
- spin_unlock(&(cachep->nodelists[nodeid])->
- list_lock);
- }
- return;
- }
- }
-#endif
+ if (cache_free_alien(cachep, objp))
+ return;
+
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;