diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 9 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 94 | ||||
-rw-r--r-- | mm/swapfile.c | 2 |
5 files changed, 75 insertions, 35 deletions
diff --git a/mm/memory.c b/mm/memory.c index 717aa0e3be2..ce3c9e4492d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2711,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + /* + * Do not print if we are in atomic + * contexts (in exception stacks, etc.): + */ + if (preempt_count()) + return; + down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma && vma->vm_file) { @@ -2719,7 +2726,7 @@ void print_vma_addr(char *prefix, unsigned long ip) if (buf) { char *p, *s; - p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE); + p = d_path(&f->f_path, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; s = strrchr(p, '/'); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8d246c3b340..6c7ba1a63d2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1996,7 +1996,7 @@ int show_numa_map(struct seq_file *m, void *v) if (file) { seq_printf(m, " file="); - seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= "); + seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else if (vma->vm_start <= mm->start_stack && diff --git a/mm/slab.c b/mm/slab.c index 40c00dacbe4..473e6c2eaef 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; slabp->nodeid = nodeid; + slabp->free = 0; return slabp; } @@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep, slab_bufctl(slabp)[i] = i + 1; } slab_bufctl(slabp)[i - 1] = BUFCTL_END; - slabp->free = 0; } static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) @@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep, if (!slabp) goto opps1; - slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp); diff --git a/mm/slub.c b/mm/slub.c index e2989ae243b..4b3895cb90e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page) /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ +#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ +#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ /* Not all arches define cache_line_size */ #ifndef cache_line_size @@ -308,7 +310,7 @@ static inline int is_end(void *addr) return (unsigned long)addr & PAGE_MAPPING_ANON; } -void *slab_address(struct page *page) +static void *slab_address(struct page *page) { return page->end - PAGE_MAPPING_ANON; } @@ -1078,14 +1080,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; int pages = 1 << s->order; - if (s->order) - flags |= __GFP_COMP; - - if (s->flags & SLAB_CACHE_DMA) - flags |= SLUB_DMA; - - if (s->flags & SLAB_RECLAIM_ACCOUNT) - flags |= __GFP_RECLAIMABLE; + flags |= s->allocflags; if (node == -1) page = alloc_pages(flags, s->order); @@ -1546,7 +1541,6 @@ load_freelist: unlock_out: slab_unlock(c->page); stat(c, ALLOC_SLOWPATH); -out: #ifdef SLUB_FASTPATH local_irq_restore(flags); #endif @@ -1581,8 +1575,24 @@ new_slab: c->page = new; goto load_freelist; } - object = NULL; - goto out; +#ifdef SLUB_FASTPATH + local_irq_restore(flags); +#endif + /* + * No memory available. + * + * If the slab uses higher order allocs but the object is + * smaller than a page size then we can fallback in emergencies + * to the page allocator via kmalloc_large. The page allocator may + * have failed to obtain a higher order page and we can try to + * allocate a single page if the object fits into a single page. + * That is only possible if certain conditions are met that are being + * checked when a slab is created. + */ + if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) + return kmalloc_large(s->objsize, gfpflags); + + return NULL; debug: object = c->page->freelist; if (!alloc_debug_processing(s, c->page, object, addr)) @@ -2329,10 +2339,33 @@ static int calculate_sizes(struct kmem_cache *s) size = ALIGN(size, align); s->size = size; - s->order = calculate_order(size); + if ((flags & __KMALLOC_CACHE) && + PAGE_SIZE / size < slub_min_objects) { + /* + * Kmalloc cache that would not have enough objects in + * an order 0 page. Kmalloc slabs can fallback to + * page allocator order 0 allocs so take a reasonably large + * order that will allows us a good number of objects. + */ + s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); + s->flags |= __PAGE_ALLOC_FALLBACK; + s->allocflags |= __GFP_NOWARN; + } else + s->order = calculate_order(size); + if (s->order < 0) return 0; + s->allocflags = 0; + if (s->order) + s->allocflags |= __GFP_COMP; + + if (s->flags & SLAB_CACHE_DMA) + s->allocflags |= SLUB_DMA; + + if (s->flags & SLAB_RECLAIM_ACCOUNT) + s->allocflags |= __GFP_RECLAIMABLE; + /* * Determine the number of objects per slab */ @@ -2484,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; +struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; +static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; #endif static int __init setup_slub_min_order(char *str) @@ -2536,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) + flags | __KMALLOC_CACHE, NULL)) goto panic; list_add(&s->list, &slab_caches); @@ -2670,9 +2703,8 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -2688,9 +2720,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -3001,7 +3032,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -3028,7 +3059,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); @@ -3057,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s) if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; + if ((s->flags & __PAGE_ALLOC_FALLBACK)) + return 1; + if (s->ctor) return 1; @@ -3218,9 +3252,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) @@ -3234,9 +3268,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) diff --git a/mm/swapfile.c b/mm/swapfile.c index 02ccab5ad9d..2da149cfc9a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1394,7 +1394,7 @@ static int swap_show(struct seq_file *swap, void *v) } file = ptr->swap_file; - len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\"); + len = seq_path(swap, &file->f_path, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? |