From cd9640a70d542ca026a812ac34733799da0a39c9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 16 Mar 2010 18:55:54 +0000 Subject: xfs: remove old vmap cache Re-apply a commit that had been reverted due to regressions that have since been fixed. Original commit: d2859751cd0bf586941ffa7308635a293f943c17 Author: Nick Piggin Date: Tue, 6 Jan 2009 14:40:44 +1100 XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps track of them in a list. To purge the batch, it just goes through the list and calls vunamp on each one. This is pretty poor: a global TLB flush is generally still performed on each vunmap, with the most expensive parts of the operation being the broadcast IPIs and locking involved in the SMP callouts, and the locking involved in the vmap management -- none of these are avoided by just batching up the calls. I'm actually surprised it ever made much difference. (Now that the lazy vmap allocator is upstream, this description is not quite right, but the vunmap batching still doesn't seem to do much). Rip all this logic out of XFS completely. I will improve vmap performance and scalability directly in subsequent patch. Signed-off-by: Nick Piggin Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy The only change I made was to use the "new" xfs_buf_is_vmapped() function in a place it had been open-coded in the original. Modified-by: Alex Elder Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 76 +--------------------------------------------- 1 file changed, 1 insertion(+), 75 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 6f76ba85f19..81f4ef27de3 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -167,75 +167,6 @@ test_page_region( return (mask && (page_private(page) & mask) == mask); } -/* - * Mapping of multi-page buffers into contiguous virtual space - */ - -typedef struct a_list { - void *vm_addr; - struct a_list *next; -} a_list_t; - -static a_list_t *as_free_head; -static int as_list_len; -static DEFINE_SPINLOCK(as_lock); - -/* - * Try to batch vunmaps because they are costly. - */ -STATIC void -free_address( - void *addr) -{ - a_list_t *aentry; - -#ifdef CONFIG_XEN - /* - * Xen needs to be able to make sure it can get an exclusive - * RO mapping of pages it wants to turn into a pagetable. If - * a newly allocated page is also still being vmap()ed by xfs, - * it will cause pagetable construction to fail. This is a - * quick workaround to always eagerly unmap pages so that Xen - * is happy. - */ - vunmap(addr); - return; -#endif - - aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); - if (likely(aentry)) { - spin_lock(&as_lock); - aentry->next = as_free_head; - aentry->vm_addr = addr; - as_free_head = aentry; - as_list_len++; - spin_unlock(&as_lock); - } else { - vunmap(addr); - } -} - -STATIC void -purge_addresses(void) -{ - a_list_t *aentry, *old; - - if (as_free_head == NULL) - return; - - spin_lock(&as_lock); - aentry = as_free_head; - as_free_head = NULL; - as_list_len = 0; - spin_unlock(&as_lock); - - while ((old = aentry) != NULL) { - vunmap(aentry->vm_addr); - aentry = aentry->next; - kfree(old); - } -} - /* * Internal xfs_buf_t object manipulation */ @@ -337,7 +268,7 @@ xfs_buf_free( uint i; if (xfs_buf_is_vmapped(bp)) - free_address(bp->b_addr - bp->b_offset); + vunmap(bp->b_addr - bp->b_offset); for (i = 0; i < bp->b_page_count; i++) { struct page *page = bp->b_pages[i]; @@ -457,8 +388,6 @@ _xfs_buf_map_pages( bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_flags |= XBF_MAPPED; } else if (flags & XBF_MAPPED) { - if (as_list_len > 64) - purge_addresses(); bp->b_addr = vmap(bp->b_pages, bp->b_page_count, VM_MAP, PAGE_KERNEL); if (unlikely(bp->b_addr == NULL)) @@ -1955,9 +1884,6 @@ xfsbufd( xfs_buf_iostrategy(bp); count++; } - - if (as_list_len > 0) - purge_addresses(); if (count) blk_run_address_space(target->bt_mapping); -- cgit v1.2.3 From 8a262e573d30187b32b5534ec489446931239cc5 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 16 Mar 2010 18:55:56 +0000 Subject: xfs: use scalable vmap API Re-apply a commit that had been reverted due to regressions that have since been fixed. From 95f8e302c04c0b0c6de35ab399a5551605eeb006 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:43:09 +1100 Implement XFS's large buffer support with the new vmap APIs. See the vmap rewrite (db64fe02) for some numbers. The biggest improvement that comes from using the new APIs is avoiding the global KVA allocation lock on every call. Signed-off-by: Nick Piggin Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Only modifications here were a minor reformat, plus making the patch apply given the new use of xfs_buf_is_vmapped(). Modified-by: Alex Elder Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 81f4ef27de3..bd111b7e1da 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -268,7 +268,8 @@ xfs_buf_free( uint i; if (xfs_buf_is_vmapped(bp)) - vunmap(bp->b_addr - bp->b_offset); + vm_unmap_ram(bp->b_addr - bp->b_offset, + bp->b_page_count); for (i = 0; i < bp->b_page_count; i++) { struct page *page = bp->b_pages[i]; @@ -388,8 +389,8 @@ _xfs_buf_map_pages( bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_flags |= XBF_MAPPED; } else if (flags & XBF_MAPPED) { - bp->b_addr = vmap(bp->b_pages, bp->b_page_count, - VM_MAP, PAGE_KERNEL); + bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, + -1, PAGE_KERNEL); if (unlikely(bp->b_addr == NULL)) return -ENOMEM; bp->b_addr += bp->b_offset; -- cgit v1.2.3 From e8c3753ce4cd6a805ebcfdb3aa6d30e6f4b8b3e0 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 15 Mar 2010 02:36:35 +0000 Subject: xfs: don't warn about page discards on shutdown If we are doing a forced shutdown, we can get lots of noise about delalloc pages being discarded. This is happens by design during a forced shutdown, so don't spam the logs with these messages. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 9083357f9e4..99628508cb1 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -932,6 +932,9 @@ xfs_aops_discard_page( if (!xfs_is_delayed_page(page, IOMAP_DELAY)) goto out_invalidate; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + goto out_invalidate; + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, "page discard on page %p, inode 0x%llx, offset %llu.", page, ip->i_ino, offset); @@ -964,8 +967,10 @@ xfs_aops_discard_page( if (error) { /* something screwed, just bail */ - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, - "page discard failed delalloc mapping lookup."); + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, + "page discard failed delalloc mapping lookup."); + } break; } if (!nimaps) { @@ -991,8 +996,10 @@ xfs_aops_discard_page( ASSERT(!flist.xbf_count && !flist.xbf_first); if (error) { /* something screwed, just bail */ - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, "page discard unable to remove delalloc mapping."); + } break; } next_buffer: -- cgit v1.2.3