diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 59 | ||||
-rw-r--r-- | mm/rmap.c | 6 | ||||
-rw-r--r-- | mm/vmalloc.c | 33 |
4 files changed, 55 insertions, 45 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 47263ac3e4e..1d33fec7bac 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, if (pos < size) { retval = generic_file_direct_IO(READ, iocb, iov, pos, nr_segs); - if (retval >= 0 && !is_sync_kiocb(iocb)) + if (retval > 0 && !is_sync_kiocb(iocb)) retval = -EIOCBQUEUED; if (retval > 0) *ppos = pos + retval; diff --git a/mm/mmap.c b/mm/mmap.c index 63df2d69841..de54acd9942 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1302,37 +1302,40 @@ unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - if (flags & MAP_FIXED) { - unsigned long ret; + unsigned long ret; - if (addr > TASK_SIZE - len) - return -ENOMEM; - if (addr & ~PAGE_MASK) - return -EINVAL; - if (file && is_file_hugepages(file)) { - /* - * Check if the given range is hugepage aligned, and - * can be made suitable for hugepages. - */ - ret = prepare_hugepage_range(addr, len); - } else { - /* - * Ensure that a normal request is not falling in a - * reserved hugepage range. For some archs like IA-64, - * there is a separate region for hugepages. - */ - ret = is_hugepage_only_range(current->mm, addr, len); - } - if (ret) - return -EINVAL; - return addr; - } + if (!(flags & MAP_FIXED)) { + unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - if (file && file->f_op && file->f_op->get_unmapped_area) - return file->f_op->get_unmapped_area(file, addr, len, - pgoff, flags); + get_area = current->mm->get_unmapped_area; + if (file && file->f_op && file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + addr = get_area(file, addr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + } - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + if (addr > TASK_SIZE - len) + return -ENOMEM; + if (addr & ~PAGE_MASK) + return -EINVAL; + if (file && is_file_hugepages(file)) { + /* + * Check if the given range is hugepage aligned, and + * can be made suitable for hugepages. + */ + ret = prepare_hugepage_range(addr, len); + } else { + /* + * Ensure that a normal request is not falling in a + * reserved hugepage range. For some archs like IA-64, + * there is a separate region for hugepages. + */ + ret = is_hugepage_only_range(current->mm, addr, len); + } + if (ret) + return -EINVAL; + return addr; } EXPORT_SYMBOL(get_unmapped_area); diff --git a/mm/rmap.c b/mm/rmap.c index a6203b4e127..9827409eb7c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -626,7 +626,7 @@ static void try_to_unmap_cluster(unsigned long cursor, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte, *original_pte; pte_t pteval; struct page *page; unsigned long address; @@ -658,7 +658,7 @@ static void try_to_unmap_cluster(unsigned long cursor, if (!pmd_present(*pmd)) goto out_unlock; - for (pte = pte_offset_map(pmd, address); + for (original_pte = pte = pte_offset_map(pmd, address); address < end; pte++, address += PAGE_SIZE) { if (!pte_present(*pte)) @@ -694,7 +694,7 @@ static void try_to_unmap_cluster(unsigned long cursor, (*mapcount)--; } - pte_unmap(pte); + pte_unmap(original_pte); out_unlock: spin_unlock(&mm->page_table_lock); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2bd83e5c2bb..8ff16a1eee6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); } -/** - * remove_vm_area - find and remove a contingous kernel virtual area - * - * @addr: base address - * - * Search for the kernel VM area starting at @addr, and remove it. - * This function returns the found VM area, but using it is NOT safe - * on SMP machines. - */ -struct vm_struct *remove_vm_area(void *addr) +/* Caller must hold vmlist_lock */ +struct vm_struct *__remove_vm_area(void *addr) { struct vm_struct **p, *tmp; - write_lock(&vmlist_lock); for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { if (tmp->addr == addr) goto found; } - write_unlock(&vmlist_lock); return NULL; found: unmap_vm_area(tmp); *p = tmp->next; - write_unlock(&vmlist_lock); /* * Remove the guard page. @@ -281,6 +270,24 @@ found: return tmp; } +/** + * remove_vm_area - find and remove a contingous kernel virtual area + * + * @addr: base address + * + * Search for the kernel VM area starting at @addr, and remove it. + * This function returns the found VM area, but using it is NOT safe + * on SMP machines, except for its size or flags. + */ +struct vm_struct *remove_vm_area(void *addr) +{ + struct vm_struct *v; + write_lock(&vmlist_lock); + v = __remove_vm_area(addr); + write_unlock(&vmlist_lock); + return v; +} + void __vunmap(void *addr, int deallocate_pages) { struct vm_struct *area; |