aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/slab.c2
4 files changed, 9 insertions, 5 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac352..391ffc54d13 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
If unsure, choose this option (Flat Memory) over any other.
config DISCONTIGMEM_MANUAL
- bool "Discontigious Memory"
+ bool "Discontiguous Memory"
depends on ARCH_DISCONTIGMEM_ENABLE
help
This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
memory hotplug systems. This is normal.
For many other systems, this will be an alternative to
- "Discontigious Memory". This option provides some potential
+ "Discontiguous Memory". This option provides some potential
performance benefits, along with decreased code complexity,
but it is newer, and more experimental.
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8a..fa11d91242e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
/*
* Get rid of page table information in the indicated region.
*
- * Called with the page table lock held.
+ * Called with the mm semaphore held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
return -ENOMEM;
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory(vma_pages(vma)))
+ return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9..57577f63b30 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
- if ((newflags & ~(newflags >> 4)) & 0xf) {
+ /* newflags >> 4 shift VM_MAY% in place of VM_% */
+ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
error = -EACCES;
goto out;
}
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad..437d3388054 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -659,7 +659,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
* kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized.
*/
- BUG_ON(csizep->cs_cachep == NULL);
+ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
while (size > csizep->cs_size)
csizep++;