aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-21 10:29:44 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit4a09675279674041862d2210635b0cc1f60be28e (patch)
tree19e4736c062f87729dcdc1bd57f4919b3227ec32 /fs/btrfs/inode.c
parente5a2217ef6ff088d08a27208929a6f9c635d672c (diff)
Btrfs: Data ordered fixes
* In btrfs_delete_inode, wait for ordered extents after calling truncate_inode_pages. This is much faster, and more correct * Properly clear our the PageChecked bit everywhere we redirty the page. * Change the writepage fixup handler to lock the page range and check to see if an ordered extent had been inserted since the improperly dirtied page was discovered * Wait for ordered extents outside the transaction. This isn't required for locking rules but does improve transaction latencies * Reduce contention on the alloc_mutex by dropping it while incrementing refs on a node/leaf and while dropping refs on a leaf. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 50ee4befac8..8fb6dc25e7a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -418,7 +418,7 @@ void btrfs_writepage_fixup_worker(struct btrfs_work *work)
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
-
+again:
lock_page(page);
if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
ClearPageChecked(page);
@@ -430,9 +430,21 @@ void btrfs_writepage_fixup_worker(struct btrfs_work *work)
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
- ordered = btrfs_lookup_ordered_extent(inode, page_start);
- if (ordered)
+
+ /* already ordered? We're done */
+ if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+ EXTENT_ORDERED, 0)) {
goto out;
+ }
+
+ ordered = btrfs_lookup_ordered_extent(inode, page_start);
+ if (ordered) {
+ unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
+ page_end, GFP_NOFS);
+ unlock_page(page);
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ goto again;
+ }
set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, page_end,
GFP_NOFS);
@@ -1465,11 +1477,11 @@ void btrfs_delete_inode(struct inode *inode)
unsigned long nr;
int ret;
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode)) {
goto no_delete;
}
+ btrfs_wait_ordered_range(inode, 0, (u64)-1);
btrfs_i_size_write(inode, 0);
trans = btrfs_start_transaction(root, 1);
@@ -2707,6 +2719,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
1, 1, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
+ ClearPageChecked(page);
if (PagePrivate(page)) {
invalidate_extent_lru(tree, page_offset(page),
PAGE_CACHE_SIZE);
@@ -2818,10 +2831,10 @@ static void btrfs_truncate(struct inode *inode)
return;
btrfs_truncate_page(inode->i_mapping, inode->i_size);
+ btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
trans = btrfs_start_transaction(root, 1);
btrfs_set_trans_block_group(trans, inode);
- btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
btrfs_i_size_write(inode, inode->i_size);
/* FIXME, add redo link to tree so we don't leak on crash */