aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c175
-rw-r--r--mm/swapfile.c2
5 files changed, 73 insertions, 118 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 717aa0e3be2..ce3c9e4492d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2711,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ /*
+ * Do not print if we are in atomic
+ * contexts (in exception stacks, etc.):
+ */
+ if (preempt_count())
+ return;
+
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
@@ -2719,7 +2726,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
if (buf) {
char *p, *s;
- p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE);
+ p = d_path(&f->f_path, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
s = strrchr(p, '/');
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8d246c3b340..6c7ba1a63d2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1996,7 +1996,7 @@ int show_numa_map(struct seq_file *m, void *v)
if (file) {
seq_printf(m, " file=");
- seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
+ seq_path(m, &file->f_path, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_printf(m, " heap");
} else if (vma->vm_start <= mm->start_stack &&
diff --git a/mm/slab.c b/mm/slab.c
index 40c00dacbe4..473e6c2eaef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
slabp->nodeid = nodeid;
+ slabp->free = 0;
return slabp;
}
@@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
- slabp->free = 0;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep,
if (!slabp)
goto opps1;
- slabp->nodeid = nodeid;
slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp);
diff --git a/mm/slub.c b/mm/slub.c
index e2989ae243b..74c65af0a54 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,13 +149,6 @@ static inline void ClearSlabDebug(struct page *page)
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST
-/*
- * Currently fastpath is not supported if preemption is enabled.
- */
-#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
-#define SLUB_FASTPATH
-#endif
-
#if PAGE_SHIFT <= 12
/*
@@ -211,6 +204,8 @@ static inline void ClearSlabDebug(struct page *page)
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
+#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
+#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
/* Not all arches define cache_line_size */
#ifndef cache_line_size
@@ -308,7 +303,7 @@ static inline int is_end(void *addr)
return (unsigned long)addr & PAGE_MAPPING_ANON;
}
-void *slab_address(struct page *page)
+static void *slab_address(struct page *page)
{
return page->end - PAGE_MAPPING_ANON;
}
@@ -1078,14 +1073,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
int pages = 1 << s->order;
- if (s->order)
- flags |= __GFP_COMP;
-
- if (s->flags & SLAB_CACHE_DMA)
- flags |= SLUB_DMA;
-
- if (s->flags & SLAB_RECLAIM_ACCOUNT)
- flags |= __GFP_RECLAIMABLE;
+ flags |= s->allocflags;
if (node == -1)
page = alloc_pages(flags, s->order);
@@ -1519,11 +1507,7 @@ static void *__slab_alloc(struct kmem_cache *s,
{
void **object;
struct page *new;
-#ifdef SLUB_FASTPATH
- unsigned long flags;
- local_irq_save(flags);
-#endif
if (!c->page)
goto new_slab;
@@ -1546,10 +1530,6 @@ load_freelist:
unlock_out:
slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH);
-out:
-#ifdef SLUB_FASTPATH
- local_irq_restore(flags);
-#endif
return object;
another_slab:
@@ -1581,8 +1561,22 @@ new_slab:
c->page = new;
goto load_freelist;
}
- object = NULL;
- goto out;
+
+ /*
+ * No memory available.
+ *
+ * If the slab uses higher order allocs but the object is
+ * smaller than a page size then we can fallback in emergencies
+ * to the page allocator via kmalloc_large. The page allocator may
+ * have failed to obtain a higher order page and we can try to
+ * allocate a single page if the object fits into a single page.
+ * That is only possible if certain conditions are met that are being
+ * checked when a slab is created.
+ */
+ if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
+ return kmalloc_large(s->objsize, gfpflags);
+
+ return NULL;
debug:
object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1609,34 +1603,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{
void **object;
struct kmem_cache_cpu *c;
-
-/*
- * The SLUB_FASTPATH path is provisional and is currently disabled if the
- * kernel is compiled with preemption or if the arch does not support
- * fast cmpxchg operations. There are a couple of coming changes that will
- * simplify matters and allow preemption. Ultimately we may end up making
- * SLUB_FASTPATH the default.
- *
- * 1. The introduction of the per cpu allocator will avoid array lookups
- * through get_cpu_slab(). A special register can be used instead.
- *
- * 2. The introduction of per cpu atomic operations (cpu_ops) means that
- * we can realize the logic here entirely with per cpu atomics. The
- * per cpu atomic ops will take care of the preemption issues.
- */
-
-#ifdef SLUB_FASTPATH
- c = get_cpu_slab(s, raw_smp_processor_id());
- do {
- object = c->freelist;
- if (unlikely(is_end(object) || !node_match(c, node))) {
- object = __slab_alloc(s, gfpflags, node, addr, c);
- break;
- }
- stat(c, ALLOC_FASTPATH);
- } while (cmpxchg_local(&c->freelist, object, object[c->offset])
- != object);
-#else
unsigned long flags;
local_irq_save(flags);
@@ -1651,7 +1617,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);
-#endif
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, c->objsize);
@@ -1688,11 +1653,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
void **object = (void *)x;
struct kmem_cache_cpu *c;
-#ifdef SLUB_FASTPATH
- unsigned long flags;
-
- local_irq_save(flags);
-#endif
c = get_cpu_slab(s, raw_smp_processor_id());
stat(c, FREE_SLOWPATH);
slab_lock(page);
@@ -1724,9 +1684,6 @@ checks_ok:
out_unlock:
slab_unlock(page);
-#ifdef SLUB_FASTPATH
- local_irq_restore(flags);
-#endif
return;
slab_empty:
@@ -1739,9 +1696,6 @@ slab_empty:
}
slab_unlock(page);
stat(c, FREE_SLAB);
-#ifdef SLUB_FASTPATH
- local_irq_restore(flags);
-#endif
discard_slab(s, page);
return;
@@ -1767,34 +1721,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
-
-#ifdef SLUB_FASTPATH
- void **freelist;
-
- c = get_cpu_slab(s, raw_smp_processor_id());
- debug_check_no_locks_freed(object, s->objsize);
- do {
- freelist = c->freelist;
- barrier();
- /*
- * If the compiler would reorder the retrieval of c->page to
- * come before c->freelist then an interrupt could
- * change the cpu slab before we retrieve c->freelist. We
- * could be matching on a page no longer active and put the
- * object onto the freelist of the wrong slab.
- *
- * On the other hand: If we already have the freelist pointer
- * then any change of cpu_slab will cause the cmpxchg to fail
- * since the freelist pointers are unique per slab.
- */
- if (unlikely(page != c->page || c->node < 0)) {
- __slab_free(s, page, x, addr, c->offset);
- break;
- }
- object[c->offset] = freelist;
- stat(c, FREE_FASTPATH);
- } while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
-#else
unsigned long flags;
local_irq_save(flags);
@@ -1808,7 +1734,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
__slab_free(s, page, x, addr, c->offset);
local_irq_restore(flags);
-#endif
}
void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -2329,10 +2254,33 @@ static int calculate_sizes(struct kmem_cache *s)
size = ALIGN(size, align);
s->size = size;
- s->order = calculate_order(size);
+ if ((flags & __KMALLOC_CACHE) &&
+ PAGE_SIZE / size < slub_min_objects) {
+ /*
+ * Kmalloc cache that would not have enough objects in
+ * an order 0 page. Kmalloc slabs can fallback to
+ * page allocator order 0 allocs so take a reasonably large
+ * order that will allows us a good number of objects.
+ */
+ s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
+ s->flags |= __PAGE_ALLOC_FALLBACK;
+ s->allocflags |= __GFP_NOWARN;
+ } else
+ s->order = calculate_order(size);
+
if (s->order < 0)
return 0;
+ s->allocflags = 0;
+ if (s->order)
+ s->allocflags |= __GFP_COMP;
+
+ if (s->flags & SLAB_CACHE_DMA)
+ s->allocflags |= SLUB_DMA;
+
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ s->allocflags |= __GFP_RECLAIMABLE;
+
/*
* Determine the number of objects per slab
*/
@@ -2484,11 +2432,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
+static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
#endif
static int __init setup_slub_min_order(char *str)
@@ -2536,7 +2484,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags, NULL))
+ flags | __KMALLOC_CACHE, NULL))
goto panic;
list_add(&s->list, &slab_caches);
@@ -2670,9 +2618,8 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2688,9 +2635,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -3001,7 +2947,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3028,7 +2974,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3057,6 +3003,9 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
+ if ((s->flags & __PAGE_ALLOC_FALLBACK))
+ return 1;
+
if (s->ctor)
return 1;
@@ -3218,9 +3167,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3234,9 +3183,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 02ccab5ad9d..2da149cfc9a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1394,7 +1394,7 @@ static int swap_show(struct seq_file *swap, void *v)
}
file = ptr->swap_file;
- len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\");
+ len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?