From 6cb8f91320d3e720351c21741da795fed580b21b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 17 Jul 2007 04:03:22 -0700 Subject: Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the allocators. Move ZERO_SIZE_PTR related stuff into slab.h. Make ZERO_SIZE_PTR work for all slab allocators and get rid of the WARN_ON_ONCE(size == 0) that is still remaining in SLAB. Make slub return NULL like the other allocators if a too large memory segment is requested via __kmalloc. Signed-off-by: Christoph Lameter Acked-by: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 13 +++++++++++++ include/linux/slab_def.h | 12 ++++++++++++ include/linux/slub_def.h | 12 ------------ 3 files changed, 25 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/slab.h b/include/linux/slab.h index 27402fea9b7..0289ec89300 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -30,6 +30,19 @@ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ +/* + * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. + * + * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. + * + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +#define ZERO_SIZE_PTR ((void *)16) + +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \ + (unsigned long)ZERO_SIZE_PTR) + /* * struct kmem_cache related prototypes */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 365d036c454..16e814ffab8 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ @@ -58,6 +62,10 @@ static inline void *kzalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ @@ -88,6 +96,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size)) { int i = 0; + + if (!size) + return ZERO_SIZE_PTR; + #define CACHE(x) \ if (size <= x) \ goto found; \ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a582f677152..579b0a22858 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -159,18 +159,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) #define SLUB_DMA 0 #endif - -/* - * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. - * - * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. - * - * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. - * Both make kfree a no-op. - */ -#define ZERO_SIZE_PTR ((void *)16) - - void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -- cgit v1.2.3