From 231367fd9bccbb36309ab5bf5012e11a84231031 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Tue, 22 Jul 2008 20:21:16 +0300 Subject: mm: unexport ksize This patch removes the obsolete and no longer used exports of ksize. Signed-off-by: Adrian Bunk Signed-off-by: Pekka Enberg --- mm/slab.c | 1 - mm/slob.c | 1 - mm/slub.c | 1 - 3 files changed, 3 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 918f04f7fef..e76eee46688 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4472,4 +4472,3 @@ size_t ksize(const void *objp) return obj_size(virt_to_cache(objp)); } -EXPORT_SYMBOL(ksize); diff --git a/mm/slob.c b/mm/slob.c index d8fbd4d1bfa..4c82dd41f32 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -519,7 +519,6 @@ size_t ksize(const void *block) else return sp->page.private; } -EXPORT_SYMBOL(ksize); struct kmem_cache { unsigned int size, align; diff --git a/mm/slub.c b/mm/slub.c index b7e2cd5d82d..c26d4c36fba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2715,7 +2715,6 @@ size_t ksize(const void *object) */ return s->size; } -EXPORT_SYMBOL(ksize); void kfree(const void *x) { -- cgit v1.2.3 From 5595cffc8248e4672c5803547445e85e4053c8fc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 5 Aug 2008 09:28:47 +0300 Subject: SLUB: dynamic per-cache MIN_PARTIAL This patch changes the static MIN_PARTIAL to a dynamic per-cache ->min_partial value that is calculated from object size. The bigger the object size, the more pages we keep on the partial list. I tested SLAB, SLUB, and SLUB with this patch on Jens Axboe's 'netio' example script of the fio benchmarking tool. The script stresses the networking subsystem which should also give a fairly good beating of kmalloc() et al. To run the test yourself, first clone the fio repository: git clone git://git.kernel.dk/fio.git and then run the following command n times on your machine: time ./fio examples/netio The results on my 2-way 64-bit x86 machine are as follows: [ the minimum, maximum, and average are captured from 50 individual runs ] real time (seconds) min max avg sd SLAB 22.76 23.38 22.98 0.17 SLUB 22.80 25.78 23.46 0.72 SLUB (dynamic) 22.74 23.54 23.00 0.20 sys time (seconds) min max avg sd SLAB 6.90 8.28 7.70 0.28 SLUB 7.42 16.95 8.89 2.28 SLUB (dynamic) 7.17 8.64 7.73 0.29 user time (seconds) min max avg sd SLAB 36.89 38.11 37.50 0.29 SLUB 30.85 37.99 37.06 1.67 SLUB (dynamic) 36.75 38.07 37.59 0.32 As you can see from the above numbers, this patch brings SLUB to the same level as SLAB for this particular workload fixing a ~2% regression. I'd expect this change to help similar workloads that allocate a lot of objects that are close to the size of a page. Cc: Matthew Wilcox Cc: Andrew Morton Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slub_def.h | 1 + mm/slub.c | 26 +++++++++++++++++++------- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f6..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; diff --git a/mm/slub.c b/mm/slub.c index c26d4c36fba..4f5b9614945 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed_hardwall(zone, flags) && - n->nr_partial > MIN_PARTIAL) { + n->nr_partial > n->min_partial) { page = get_partial_node(n); if (page) return page; @@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) slab_unlock(page); } else { stat(c, DEACTIVATE_EMPTY); - if (n->nr_partial < MIN_PARTIAL) { + if (n->nr_partial < n->min_partial) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs @@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, #endif } -static void init_kmem_cache_node(struct kmem_cache_node *n) +static void +init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) { n->nr_partial = 0; + + /* + * The larger the object size is, the more pages we want on the partial + * list to avoid pounding the page allocator excessively. + */ + n->min_partial = ilog2(s->size); + if (n->min_partial < MIN_PARTIAL) + n->min_partial = MIN_PARTIAL; + else if (n->min_partial > MAX_PARTIAL) + n->min_partial = MAX_PARTIAL; + spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG @@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, init_object(kmalloc_caches, n, 1); init_tracking(kmalloc_caches, n); #endif - init_kmem_cache_node(n); + init_kmem_cache_node(n, kmalloc_caches); inc_slabs_node(kmalloc_caches, node, page->objects); /* @@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) } s->node[node] = n; - init_kmem_cache_node(n); + init_kmem_cache_node(n, s); } return 1; } @@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { - init_kmem_cache_node(&s->local_node); + init_kmem_cache_node(&s->local_node, s); return 1; } #endif @@ -2889,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg) ret = -ENOMEM; goto out; } - init_kmem_cache_node(n); + init_kmem_cache_node(n, s); s->node[nid] = n; } out: -- cgit v1.2.3