From 93fac7041f082297b93655a0e49f659cd7520e40 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 31 Mar 2006 02:29:56 -0800 Subject: [PATCH] mm: schedule find_trylock_page() removal find_trylock_page() is an odd interface in that it doesn't take a reference like the others. Now that XFS no longer uses it, and its last remaining caller actually wants an elevated refcount, opencode that callsite and schedule find_trylock_page() for removal. Signed-off-by: Nick Piggin Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 39aa9d12961..e5fd5385f0c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry) p = swap_info_get(entry); if (p) { - if (swap_entry_free(p, swp_offset(entry)) == 1) - page = find_trylock_page(&swapper_space, entry.val); + if (swap_entry_free(p, swp_offset(entry)) == 1) { + page = find_get_page(&swapper_space, entry.val); + if (page && unlikely(TestSetPageLocked(page))) { + page_cache_release(page); + page = NULL; + } + } spin_unlock(&swap_lock); } if (page) { int one_user; BUG_ON(PagePrivate(page)); - page_cache_get(page); one_user = (page_count(page) == 2); /* Only cache user (+us), or swap space full? Free it! */ - if (!PageWriteback(page) && (one_user || vm_swap_full())) { + /* Also recheck PageSwapCache after page is locked (above) */ + if (PageSwapCache(page) && !PageWriteback(page) && + (one_user || vm_swap_full())) { delete_from_swap_cache(page); SetPageDirty(page); } -- cgit v1.2.3 From d6692183ac1d8f4a4e4015f9ce9acc2514618e0b Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 31 Mar 2006 02:29:57 -0800 Subject: [PATCH] fix extra page ref count in follow_hugetlb_page git-commit: d5d4b0aa4e1430d73050babba999365593bdb9d2 "[PATCH] optimize follow_hugetlb_page" breaks mlock on hugepage areas. I mis-interpret pages argument and made get_page() unconditional. It should only get a ref count when "pages" argument is non-null. Credit goes to Adam Litke who spotted the bug. Signed-off-by: Ken Chen Acked-by: Adam Litke Cc: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ebad6bbb350..d87885eb4ac 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -697,9 +697,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; page = pte_page(*pte); same_page: - get_page(page); - if (pages) + if (pages) { + get_page(page); pages[i] = page + pfn_offset; + } if (vmas) vmas[i] = vma; -- cgit v1.2.3 From 78c997a4be7d1ed3ff4c27f23d30a0185d39bcbf Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 31 Mar 2006 02:30:01 -0800 Subject: [PATCH] hugetlb: don't allow free hugetlb count fall below reserved count With strict page reservation, I think kernel should enforce number of free hugetlb page don't fall below reserved count. Currently it is possible in the sysctl path. Add proper check in sysctl to disallow that. Signed-off-by: Ken Chen Cc: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d87885eb4ac..832f676ca03 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count) return nr_huge_pages; spin_lock(&hugetlb_lock); + count = max(count, reserved_huge_pages); try_to_free_low(count); while (count < nr_huge_pages) { struct page *page = dequeue_huge_page(NULL, 0); -- cgit v1.2.3 From 9b41046cd0ee0a57f849d6e1363f7933e363cca9 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Fri, 31 Mar 2006 02:30:33 -0800 Subject: [PATCH] Don't pass boot parameters to argv_init[] The boot cmdline is parsed in parse_early_param() and parse_args(,unknown_bootoption). And __setup() is used in obsolete_checksetup(). start_kernel() -> parse_args() -> unknown_bootoption() -> obsolete_checksetup() If __setup()'s callback (->setup_func()) returns 1 in obsolete_checksetup(), obsolete_checksetup() thinks a parameter was handled. If ->setup_func() returns 0, obsolete_checksetup() tries other ->setup_func(). If all ->setup_func() that matched a parameter returns 0, a parameter is seted to argv_init[]. Then, when runing /sbin/init or init=app, argv_init[] is passed to the app. If the app doesn't ignore those arguments, it will warning and exit. This patch fixes a wrong usage of it, however fixes obvious one only. Signed-off-by: OGAWA Hirofumi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 8d8f52569f3..0ec7bc64427 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1; static int __init disable_randmaps(char *s) { randomize_va_space = 0; - return 0; + return 1; } __setup("norandmaps", disable_randmaps); -- cgit v1.2.3 From f79e2abb9bd452d97295f34376dedbec9686b986 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 31 Mar 2006 02:30:42 -0800 Subject: [PATCH] sys_sync_file_range() Remove the recently-added LINUX_FADV_ASYNC_WRITE and LINUX_FADV_WRITE_WAIT fadvise() additions, do it in a new sys_sync_file_range() syscall instead. Reasons: - It's more flexible. Things which would require two or three syscalls with fadvise() can be done in a single syscall. - Using fadvise() in this manner is something not covered by POSIX. The patch wires up the syscall for x86. The sycall is implemented in the new fs/sync.c. The intention is that we can move sys_fsync(), sys_fdatasync() and perhaps sys_sync() into there later. Documentation for the syscall is in fs/sync.c. A test app (sync_file_range.c) is in http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz. The available-to-GPL-modules do_sync_file_range() is for knfsd: "A COMMIT can say NFS_DATA_SYNC or NFS_FILE_SYNC. I can skip the ->fsync call for NFS_DATA_SYNC which is hopefully the more common." Note: the `async' writeout mode SYNC_FILE_RANGE_WRITE will turn synchronous if the queue is congested. This is trivial to fix: add a new flag bit, set wbc->nonblocking. But I'm not sure that we want to expose implementation details down to that level. Note: it's notable that we can sync an fd which wasn't opened for writing. Same with fsync() and fdatasync()). Note: the code takes some care to handle attempts to sync file contents outside the 16TB offset on 32-bit machines. It makes such attempts appear to succeed, for best 32-bit/64-bit compatibility. Perhaps it should make such requests fail... Cc: Nick Piggin Cc: Michael Kerrisk Cc: Ulrich Drepper Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/fadvise.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index 907c39257ca..0a03357a1f8 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -35,17 +35,6 @@ * * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. * - * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently - * dirty pages at the disk. - * - * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push - * all of the currently dirty pages at the disk, wait until they have been - * written. - * - * It should be noted that none of these operations write out the file's - * metadata. So unless the application is strictly performing overwrites of - * already-instantiated disk blocks, there are no guarantees here that the data - * will be available after a crash. */ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) { @@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) invalidate_mapping_pages(mapping, start_index, end_index); break; - case LINUX_FADV_ASYNC_WRITE: - ret = __filemap_fdatawrite_range(mapping, offset, endbyte, - WB_SYNC_NONE); - break; - case LINUX_FADV_WRITE_WAIT: - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); - break; default: ret = -EINVAL; } -- cgit v1.2.3 From 46a350ef9803a2526a128c55cdb27dd73b2ad966 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sat, 1 Apr 2006 01:23:29 +0200 Subject: BUG_ON() Conversion in mm/mmap.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- mm/mmap.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 4f5b5709136..e780d19aa21 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm) i = browse_rb(&mm->mm_rb); if (i != mm->map_count) printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; - if (bug) - BUG(); + BUG_ON(bug); } #else #define validate_mm(mm) do { } while (0) @@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct rb_node ** rb_link, * rb_parent; __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); - if (__vma && __vma->vm_start < vma->vm_end) - BUG(); + BUG_ON(__vma && __vma->vm_start < vma->vm_end); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } @@ -813,8 +811,7 @@ try_prev: * (e.g. stash info in next's anon_vma_node when assigning * an anon_vma, or when trying vma_merge). Another time. */ - if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) - BUG(); + BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); if (!near) goto none; -- cgit v1.2.3 From e74ca2b49b8a38e9ba0bc039a00498c21140f1d6 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sat, 1 Apr 2006 01:25:12 +0200 Subject: BUG_ON() Conversion in mm/swap_state.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- mm/swap_state.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swap_state.c b/mm/swap_state.c index d7af296833f..e0e1583f32c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) swp_entry_t entry; int err; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); for (;;) { entry = get_swap_page(); -- cgit v1.2.3 From 5aae277ed67b0271235d3a50908bb48b0e59be26 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sat, 1 Apr 2006 01:26:09 +0200 Subject: BUG_ON() Conversion in mm/vmalloc.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- mm/vmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 729eb3eec75..c0504f1e34e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages) int i; for (i = 0; i < area->nr_pages; i++) { - if (unlikely(!area->pages[i])) - BUG(); + BUG_ON(!area->pages[i]); __free_page(area->pages[i]); } -- cgit v1.2.3 From 75babcacede876608f14ef1a20e795ce17ae637f Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sun, 2 Apr 2006 13:47:35 +0200 Subject: BUG_ON() Conversion in mm/highmem.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- mm/highmem.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/highmem.c b/mm/highmem.c index 55885f64af4..9b274fdf9d0 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - if (pte_none(pkmap_page_table[i])) - BUG(); + BUG_ON(pte_none(pkmap_page_table[i])); /* * Don't need an atomic fetch-and-clear op here; @@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page) if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; - if (pkmap_count[PKMAP_NR(vaddr)] < 2) - BUG(); + BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); spin_unlock(&kmap_lock); return (void*) vaddr; } @@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page) spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); - if (!vaddr) - BUG(); + BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* @@ -220,8 +217,7 @@ static __init int init_emergency_pool(void) return 0; page_pool = mempool_create_page_pool(POOL_SIZE, 0); - if (!page_pool) - BUG(); + BUG_ON(!page_pool); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); return 0; @@ -264,8 +260,7 @@ int init_emergency_isa_pool(void) isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, mempool_free_pages, (void *) 0); - if (!isa_page_pool) - BUG(); + BUG_ON(!isa_page_pool); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); return 0; -- cgit v1.2.3 From 40094fa65238291d51839326320aba997092ab1f Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sun, 2 Apr 2006 13:49:25 +0200 Subject: BUG_ON() Conversion in mm/slab.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- mm/slab.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 4cbf8bb1355..f055c142021 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void) if (cache_cache.num) break; } - if (!cache_cache.num) - BUG(); + BUG_ON(!cache_cache.num); cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + @@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Always checks flags, a caller might be expecting debug support which * isn't available. */ - if (flags & ~CREATE_MASK) - BUG(); + BUG_ON(flags & ~CREATE_MASK); /* * Check that size is in terms of words. This is needed to avoid @@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node) slabp = list_entry(l3->slabs_free.prev, struct slab, list); #if DEBUG - if (slabp->inuse) - BUG(); + BUG_ON(slabp->inuse); #endif list_del(&slabp->list); @@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep) */ int kmem_cache_shrink(struct kmem_cache *cachep) { - if (!cachep || in_interrupt()) - BUG(); + BUG_ON(!cachep || in_interrupt()); return __cache_shrink(cachep); } @@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep) int i; struct kmem_list3 *l3; - if (!cachep || in_interrupt()) - BUG(); + BUG_ON(!cachep || in_interrupt()); /* Don't let CPUs to come and go */ lock_cpu_hotplug(); @@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) - BUG(); + BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); if (flags & SLAB_NO_GROW) return 0; -- cgit v1.2.3 From a580290c3e64bb695158a090d02d1232d9609311 Mon Sep 17 00:00:00 2001 From: Martin Waitz Date: Sun, 2 Apr 2006 13:59:55 +0200 Subject: Documentation: fix minor kernel-doc warnings This patch updates the comments to match the actual code. Signed-off-by: Martin Waitz Signed-off-by: Adrian Bunk --- mm/page-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 893d7677579..6dcce3a4bbd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping) /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied - * @nr_pages: number of pages which the caller has just dirtied + * @nr_pages_dirtied: number of pages which the caller has just dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's -- cgit v1.2.3