diff options
-rw-r--r-- | include/linux/vmalloc.h | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 26 |
2 files changed, 15 insertions, 14 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index dc9a29d84ab..924e502905d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -23,13 +23,14 @@ struct vm_area_struct; #endif struct vm_struct { + /* keep next,addr,size together to speedup lookups */ + struct vm_struct *next; void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int nr_pages; unsigned long phys_addr; - struct vm_struct *next; }; /* diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 46606c133e8..7dc6aa74516 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl if (unlikely(!area)) return NULL; - if (unlikely(!size)) { - kfree (area); + if (unlikely(!size)) return NULL; - } /* * We always allocate a guard page. @@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size) void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_user); @@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size) void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_32_user); |