From b574480507460b8e31b8d38dd4642219fc3b9a10 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 20 Jun 2009 23:34:44 -0400 Subject: jbd2: Remove GFP_ATOMIC kmalloc from inside spinlock critical region Fix jbd2_dev_to_name(), a function used when pretty-printting jbd2 and ext4 tracepoints. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 18bfd5dab64..7b545c3b394 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2410,6 +2410,7 @@ const char *jbd2_dev_to_name(dev_t device) int i = hash_32(device, CACHE_SIZE_BITS); char *ret; struct block_device *bd; + static struct devname_cache *new_dev; rcu_read_lock(); if (devcache[i] && devcache[i]->device == device) { @@ -2419,20 +2420,20 @@ const char *jbd2_dev_to_name(dev_t device) } rcu_read_unlock(); + new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); + if (!new_dev) + return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ spin_lock(&devname_cache_lock); if (devcache[i]) { if (devcache[i]->device == device) { + kfree(new_dev); ret = devcache[i]->devname; spin_unlock(&devname_cache_lock); return ret; } call_rcu(&devcache[i]->rcu, free_devcache); } - devcache[i] = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); - if (!devcache[i]) { - spin_unlock(&devname_cache_lock); - return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ - } + devcache[i] = new_dev; devcache[i]->device = device; bd = bdget(device); if (bd) { -- cgit v1.2.3 From f4a01017d678fe4baecf480e79d7c4f4b7ebc772 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sun, 5 Jul 2009 22:08:16 -0400 Subject: ext4: Fix potential reclaim deadlock when truncating partial block The ext4_block_truncate_page() function previously called grab_cache_page(), which called find_or_create_page() with the __GFP_FS flag potentially set. This could cause a deadlock if the system is low on memory and it attempts a memory reclaim, which could potentially call back into ext4. So we need to call find_or_create_page() directly, and remove the __GFP_FP flag to avoid this potential deadlock. Thanks to Roland Dreier for reporting a lockdep warning which showed this problem. [20786.363249] ================================= [20786.363257] [ INFO: inconsistent lock state ] [20786.363265] 2.6.31-2-generic #14~rbd4gitd960eea9 [20786.363270] --------------------------------- [20786.363276] inconsistent {IN-RECLAIM_FS-W} -> {RECLAIM_FS-ON-W} usage. [20786.363285] http/8397 [HC0[0]:SC0[0]:HE1:SE1] takes: [20786.363291] (jbd2_handle){+.+.?.}, at: [] jbd2_journal_start+0xdb/0x150 [20786.363314] {IN-RECLAIM_FS-W} state was registered at: [20786.363320] [] mark_irqflags+0xc6/0x1a0 [20786.363334] [] __lock_acquire+0x287/0x430 [20786.363345] [] lock_acquire+0xa5/0x150 [20786.363355] [] jbd2_journal_start+0xfa/0x150 [20786.363365] [] ext4_journal_start_sb+0x58/0x90 [20786.363377] [] ext4_delete_inode+0xc5/0x2c0 [20786.363389] [] generic_delete_inode+0xd3/0x1a0 [20786.363401] [] generic_drop_inode+0x25/0x30 [20786.363411] [] iput+0x62/0x70 [20786.363420] [] dentry_iput+0x98/0x110 [20786.363429] [] d_kill+0x50/0x80 [20786.363438] [] dput+0x95/0x180 [20786.363447] [] ecryptfs_d_release+0x2b/0x70 [20786.363459] [] d_free+0x28/0x60 [20786.363468] [] d_kill+0x68/0x80 [20786.363477] [] prune_one_dentry+0xa3/0xc0 [20786.363487] [] __shrink_dcache_sb+0x271/0x290 [20786.363497] [] prune_dcache+0x109/0x1b0 [20786.363506] [] shrink_dcache_memory+0x3f/0x50 [20786.363516] [] shrink_slab+0x12d/0x190 [20786.363527] [] balance_pgdat+0x4d7/0x640 [20786.363537] [] kswapd+0x117/0x170 [20786.363546] [] kthread+0x9e/0xb0 [20786.363558] [] child_rip+0xa/0x20 [20786.363569] [] 0xffffffffffffffff [20786.363598] irq event stamp: 15997 [20786.363603] hardirqs last enabled at (15997): [] kmem_cache_alloc+0xfd/0x1a0 [20786.363617] hardirqs last disabled at (15996): [] kmem_cache_alloc+0x61/0x1a0 [20786.363628] softirqs last enabled at (15966): [] __do_softirq+0x14a/0x220 [20786.363641] softirqs last disabled at (15861): [] call_softirq+0x1c/0x30 [20786.363651] [20786.363653] other info that might help us debug this: [20786.363660] 3 locks held by http/8397: [20786.363665] #0: (&sb->s_type->i_mutex_key#8){+.+.+.}, at: [] do_truncate+0x64/0x90 [20786.363685] #1: (&sb->s_type->i_alloc_sem_key#5){+++++.}, at: [] notify_change+0x250/0x350 [20786.363707] #2: (jbd2_handle){+.+.?.}, at: [] jbd2_journal_start+0xdb/0x150 [20786.363724] [20786.363726] stack backtrace: [20786.363734] Pid: 8397, comm: http Tainted: G C 2.6.31-2-generic #14~rbd4gitd960eea9 [20786.363741] Call Trace: [20786.363752] [] print_usage_bug+0x18c/0x1a0 [20786.363763] [] ? check_usage_backwards+0x0/0xb0 [20786.363773] [] mark_lock_irq+0xf2/0x280 [20786.363783] [] mark_lock+0x137/0x1d0 [20786.363793] [] mark_held_locks+0x6c/0xa0 [20786.363803] [] lockdep_trace_alloc+0xaf/0xe0 [20786.363813] [] __alloc_pages_nodemask+0x7c/0x180 [20786.363824] [] ? find_get_page+0x91/0xf0 [20786.363835] [] alloc_pages_current+0x87/0xd0 [20786.363845] [] __page_cache_alloc+0x67/0x70 [20786.363856] [] find_or_create_page+0x4f/0xb0 [20786.363867] [] ext4_block_truncate_page+0x3e/0x460 [20786.363876] [] ? jbd2_journal_start+0xfa/0x150 [20786.363885] [] ? jbd2_journal_start+0xdb/0x150 [20786.363895] [] ? ext4_meta_trans_blocks+0x75/0xf0 [20786.363905] [] ext4_ext_truncate+0x1bb/0x1e0 [20786.363916] [] ? unmap_mapping_range+0x75/0x290 [20786.363926] [] ext4_truncate+0x498/0x630 [20786.363938] [] ? _raw_spin_unlock+0x5e/0xb0 [20786.363947] [] ? unmap_mapping_range+0xb6/0x290 [20786.363957] [] ? trace_hardirqs_on+0xd/0x10 [20786.363966] [] ? jbd2_journal_stop+0x1f8/0x2e0 [20786.363976] [] vmtruncate+0xb0/0x110 [20786.363986] [] inode_setattr+0x35/0x170 [20786.363995] [] ext4_setattr+0x186/0x370 [20786.364005] [] notify_change+0x16b/0x350 [20786.364014] [] do_truncate+0x70/0x90 [20786.364021] [] T.657+0xeb/0x110 [20786.364021] [] sys_ftruncate+0xe/0x10 [20786.364021] [] system_call_fastpath+0x16/0x1b Reported-by: Roland Dreier Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 60a26f3a6f8..9760ba09275 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3583,7 +3583,8 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page; int err = 0; - page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); + page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, + mapping_gfp_mask(mapping) & ~__GFP_FS); if (!page) return -EINVAL; -- cgit v1.2.3 From 089ceecc1ea4a69ed8bcc5c7c7b96ce487e26b33 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Sun, 5 Jul 2009 22:17:31 -0400 Subject: ext4: mark several more functions in mballoc.c as noinline Ted noticed a stack-deep callchain through writepages->ext4_mb_regular_allocator->ext4_mb_init_cache->submit_bh ... With all the static functions in mballoc.c, gcc helpfully inlines for us, and we get something like this: ext4_mb_regular_allocator (232 bytes stack) ext4_mb_init_cache (232 bytes stack) submit_bh (starts 464 deeper) the 2 ext4 functions here get several others inlined; by telling gcc not to inline them, we can save stack space for when we head off into submit_bh land and associated block layer callchains. The following noinlined functions are only called once, so this won't impact any other callchains: ext4_mb_regular_allocator (104) (was 232) ext4_mb_find_by_goal (56) (noinlined) ext4_mb_init_group (24) (noinlined) ext4_mb_init_cache (136) (was 232) ext4_mb_generate_buddy (88) (noinlined) ext4_mb_generate_from_pa (40) (noinlined) submit_bh ext4_mb_simple_scan_group (24) (noinlined) ext4_mb_scan_aligned (56) (noinlined) ext4_mb_complex_scan_group (40) (noinlined) ext4_mb_try_best_found (24) (noinlined) now when we head off into submit_bh() we're only 264 bytes deeper in stack than when we entered ext4_mb_regular_allocator() (vs. 464 bytes before). Every 200 bytes helps. :) Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 519a0a686d9..4a45efabb20 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, } } -static void ext4_mb_generate_buddy(struct super_block *sb, +static noinline_for_stack +void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); @@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, ext4_mb_check_limits(ac, e4b, 0); } -static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, +static noinline_for_stack +int ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_free_extent ex = ac->ac_b_ex; @@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, return 0; } -static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, +static noinline_for_stack +int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { ext4_group_t group = ac->ac_g_ex.fe_group; @@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, * The routine scans buddy structures (not bitmap!) from given order * to max order and tries to find big enough chunk to satisfy the req */ -static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, +static noinline_for_stack +void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; @@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, * In order to optimize scanning, caller must pass number of * free blocks in the group, so the routine can know upper limit. */ -static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, +static noinline_for_stack +void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; @@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, * we try to find stripe-aligned chunks for stripe-size requests * XXX should do so at least for multiples of stripe size as well */ -static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, +static noinline_for_stack +void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; @@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb, } -static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) +static noinline_for_stack +int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) { int ret; @@ -3457,7 +3464,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, * used in in-core bitmap. buddy must be generated from this bitmap * Need to be called with ext4 group lock held */ -static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, +static noinline_for_stack +void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); -- cgit v1.2.3 From 726447d803802cd0be8f62d17c4a34421781b938 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Mon, 13 Jul 2009 10:24:17 -0400 Subject: ext4: naturally align struct ext4_allocation_request As Ted noted, the ext4_allocation_request isn't well aligned. Looking at it with pahole we're wasting space on 64-bit arches: struct ext4_allocation_request { struct inode * inode; /* 0 8 */ ext4_lblk_t logical; /* 8 4 */ /* XXX 4 bytes hole, try to pack */ ext4_fsblk_t goal; /* 16 8 */ ext4_lblk_t lleft; /* 24 4 */ /* XXX 4 bytes hole, try to pack */ ext4_fsblk_t pleft; /* 32 8 */ ext4_lblk_t lright; /* 40 4 */ /* XXX 4 bytes hole, try to pack */ ext4_fsblk_t pright; /* 48 8 */ unsigned int len; /* 56 4 */ unsigned int flags; /* 60 4 */ /* --- cacheline 1 boundary (64 bytes) --- */ /* size: 64, cachelines: 1, members: 9 */ /* sum members: 52, holes: 3, sum holes: 12 */ }; Grouping 32-bit members together closes these holes and shrinks the structure by 12 bytes. which is important since ext4 can get on the hairy edge of stack overruns. Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0ddf7e55abe..9714db393ef 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t; struct ext4_allocation_request { /* target inode for block we're allocating */ struct inode *inode; + /* how many blocks we want to allocate */ + unsigned int len; /* logical block in target inode */ ext4_lblk_t logical; - /* phys. target (a hint) */ - ext4_fsblk_t goal; /* the closest logical allocated block to the left */ ext4_lblk_t lleft; - /* phys. block for ^^^ */ - ext4_fsblk_t pleft; /* the closest logical allocated block to the right */ ext4_lblk_t lright; - /* phys. block for ^^^ */ + /* phys. target (a hint) */ + ext4_fsblk_t goal; + /* phys. block for the closest logical allocated block to the left */ + ext4_fsblk_t pleft; + /* phys. block for the closest logical allocated block to the right */ ext4_fsblk_t pright; - /* how many blocks we want to allocate */ - unsigned int len; /* flags. see above EXT4_MB_HINT_* */ unsigned int flags; }; -- cgit v1.2.3 From 3e03f9ca6a2599db1823bb0ea24e0845219a0e69 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Sun, 5 Jul 2009 22:29:27 -0400 Subject: ext4: Use rcu_barrier() on module unload. The ext4 module uses rcu_call() thus it should use rcu_barrier()on module unload. The kmem cache ext4_pspace_cachep is sometimes free'ed using call_rcu() callbacks. Thus, we must wait for completion of call_rcu() before doing kmem_cache_destroy(). Signed-off-by: Jesper Dangaard Brouer Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4a45efabb20..2fcaf286f1d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2909,7 +2909,11 @@ int __init init_ext4_mballoc(void) void exit_ext4_mballoc(void) { - /* XXX: synchronize_rcu(); */ + /* + * Wait for completion of call_rcu()'s on ext4_pspace_cachep + * before destroying the slab cache. + */ + rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_ext_cachep); -- cgit v1.2.3 From f91d1d04171026e56c7e343ee3cdcc801dd85cfb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 13 Jul 2009 16:16:20 -0400 Subject: jbd2: Fix a race between checkpointing code and journal_get_write_access() The following race can happen: CPU1 CPU2 checkpointing code checks the buffer, adds it to an array for writeback do_get_write_access() ... lock_buffer() unlock_buffer() flush_batch() submits the buffer for IO __jbd2_journal_file_buffer() So a buffer under writeout is returned from do_get_write_access(). Since the filesystem code relies on the fact that journaled buffers cannot be written out, it does not take the buffer lock and so it can modify buffer while it is under writeout. That can lead to a filesystem corruption if we crash at the right moment. We fix the problem by clearing the buffer dirty bit under buffer_lock even if the buffer is on BJ_None list. Actually, we clear the dirty bit regardless the list the buffer is in and warn about the fact if the buffer is already journalled. Thanks for spotting the problem goes to dingdinghua . Reported-by: dingdinghua Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/jbd2/transaction.c | 68 ++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 494501edba6..6213ac728f3 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal) wake_up(&journal->j_wait_transaction_locked); } -/* - * Report any unexpected dirty buffers which turn up. Normally those - * indicate an error, but they can occur if the user is running (say) - * tune2fs to modify the live filesystem, so we need the option of - * continuing as gracefully as possible. # - * - * The caller should already hold the journal lock and - * j_list_lock spinlock: most callers will need those anyway - * in order to probe the buffer's journaling state safely. - */ -static void jbd_unexpected_dirty_buffer(struct journal_head *jh) +static void warn_dirty_buffer(struct buffer_head *bh) { - int jlist; - - /* If this buffer is one which might reasonably be dirty - * --- ie. data, or not part of this journal --- then - * we're OK to leave it alone, but otherwise we need to - * move the dirty bit to the journal's own internal - * JBDDirty bit. */ - jlist = jh->b_jlist; + char b[BDEVNAME_SIZE]; - if (jlist == BJ_Metadata || jlist == BJ_Reserved || - jlist == BJ_Shadow || jlist == BJ_Forget) { - struct buffer_head *bh = jh2bh(jh); - - if (test_clear_buffer_dirty(bh)) - set_buffer_jbddirty(bh); - } + printk(KERN_WARNING + "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " + "There's a risk of filesystem corruption in case of system " + "crash.\n", + bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } /* @@ -593,14 +574,16 @@ repeat: if (jh->b_next_transaction) J_ASSERT_JH(jh, jh->b_next_transaction == transaction); + warn_dirty_buffer(bh); } /* * In any case we need to clean the dirty flag and we must * do it under the buffer lock to be sure we don't race * with running write-out. */ - JBUFFER_TRACE(jh, "Unexpected dirty buffer"); - jbd_unexpected_dirty_buffer(jh); + JBUFFER_TRACE(jh, "Journalling dirty buffer"); + clear_buffer_dirty(bh); + set_buffer_jbddirty(bh); } unlock_buffer(bh); @@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { + /* + * Previous jbd2_journal_forget() could have left the buffer + * with jbddirty bit set because it was being committed. When + * the commit finished, we've filed the buffer for + * checkpointing and marked it dirty. Now we are reallocating + * the buffer so the transaction freeing it must have + * committed and so it's safe to clear the dirty bit. + */ + clear_buffer_dirty(jh2bh(jh)); jh->b_transaction = transaction; /* first access by this transaction */ @@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); + /* + * We don't want to write the buffer anymore, clear the + * bit so that we don't confuse checks in + * __journal_file_buffer + */ + clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); - clear_buffer_jbddirty(bh); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); @@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh, if (jh->b_transaction && jh->b_jlist == jlist) return; - /* The following list of buffer states needs to be consistent - * with __jbd_unexpected_dirty_buffer()'s handling of dirty - * state. */ - if (jlist == BJ_Metadata || jlist == BJ_Reserved || jlist == BJ_Shadow || jlist == BJ_Forget) { + /* + * For metadata buffers, we track dirty bit in buffer_jbddirty + * instead of buffer_dirty. We should not see a dirty bit set + * here because we clear it in do_get_write_access but e.g. + * tune2fs can modify the sb and set the dirty bit at any time + * so we try to gracefully handle that. + */ + if (buffer_dirty(bh)) + warn_dirty_buffer(bh); if (test_clear_buffer_dirty(bh) || test_clear_buffer_jbddirty(bh)) was_dirty = 1; -- cgit v1.2.3 From ffacfa7a79d6c00624196b2d13b0a7f72f2b8227 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 13 Jul 2009 16:22:22 -0400 Subject: ext4: Fix truncation of symlinks after failed write Contents of long symlinks is written via standard write methods. So when the write fails, we add inode to orphan list. But symlinks don't have .truncate method defined so nobody properly removes them from the on disk orphan list. Fix this by calling ext4_truncate() directly instead of calling vmtruncate() (which is saner anyway since we don't need anything vmtruncate() does except from calling .truncate in these paths). We also add inode to orphan list only if ext4_can_truncate() is true (currently, it can be false for symlinks when there are no blocks allocated) - otherwise orphan list processing will complain and ext4_truncate() will not remove inode from on-disk orphan list. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9760ba09275..ff2afc1909b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1513,14 +1513,14 @@ retry: * Add inode to orphan list in case we crash before * truncate finishes */ - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (pos + len > inode->i_size) { - vmtruncate(inode, inode->i_size); + ext4_truncate(inode); /* - * If vmtruncate failed early the inode might + * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. @@ -1614,7 +1614,7 @@ static int ext4_ordered_write_end(struct file *file, ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them @@ -1628,9 +1628,9 @@ static int ext4_ordered_write_end(struct file *file, ret = ret2; if (pos + len > inode->i_size) { - vmtruncate(inode, inode->i_size); + ext4_truncate(inode); /* - * If vmtruncate failed early the inode might still be + * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ @@ -1655,7 +1655,7 @@ static int ext4_writeback_write_end(struct file *file, ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them @@ -1670,9 +1670,9 @@ static int ext4_writeback_write_end(struct file *file, ret = ret2; if (pos + len > inode->i_size) { - vmtruncate(inode, inode->i_size); + ext4_truncate(inode); /* - * If vmtruncate failed early the inode might still be + * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ @@ -1722,7 +1722,7 @@ static int ext4_journalled_write_end(struct file *file, unlock_page(page); page_cache_release(page); - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them @@ -1733,9 +1733,9 @@ static int ext4_journalled_write_end(struct file *file, if (!ret) ret = ret2; if (pos + len > inode->i_size) { - vmtruncate(inode, inode->i_size); + ext4_truncate(inode); /* - * If vmtruncate failed early the inode might still be + * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ @@ -2907,7 +2907,7 @@ retry: * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + ext4_truncate(inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) -- cgit v1.2.3 From 5887e98b609e96ce61ee0528cf94a2bfdc809dd7 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 5 Jul 2009 23:12:04 -0400 Subject: ext4: Calculate required journal credits for inserting an extent properly When we have space in the extent tree leaf node we should be able to insert the extent with much less journal credits. The code was doing proper calculation but missed a return statement. Reported-by: Andreas Dilger Signed-off-by: Aneesh Kumar K.V Signed-off-by: "Theodore Ts'o" --- fs/ext4/extents.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 50322a09bd0..73ebfb44ad7 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, */ /* 1 bitmap, 1 block group descriptor */ ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); + return ret; } } -- cgit v1.2.3 From 5adfee9c17314c1411095c23191c3cb0c2d25f9f Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 8 Jul 2009 17:11:24 -0400 Subject: ext4: fix no journal corruption with locale-gen If there is no journal, ext4_should_writeback_data() should return TRUE. This will fix ext4_set_aops() to set ext4_da_ops in the case of delayed allocation; otherwise ext4_journaled_aops gets used by default, which doesn't handle delayed allocation properly. The advantage of using ext4_should_writeback_data() approach is that it should handle nobh better as well. Thanks to Curt Wohlgemuth for investigating this problem, and Aneesh Kumar for suggesting this approach. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4_jbd2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index be2f426f680..d574a85aca5 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -281,10 +281,10 @@ static inline int ext4_should_order_data(struct inode *inode) static inline int ext4_should_writeback_data(struct inode *inode) { - if (EXT4_JOURNAL(inode) == NULL) - return 0; if (!S_ISREG(inode->i_mode)) return 0; + if (EXT4_JOURNAL(inode) == NULL) + return 1; if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) return 0; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) -- cgit v1.2.3 From e6462869e4fd88be5141a356ee0c28d8067340cc Mon Sep 17 00:00:00 2001 From: Johann Lombardi Date: Sun, 5 Jul 2009 23:45:11 -0400 Subject: ext4: Fix goal inum check in the inode allocator The goal inode is specificed by inode number which belongs to [1; s_inodes_count]. Signed-off-by: Johann Lombardi Signed-off-by: "Theodore Ts'o" --- fs/ext4/ialloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 2f645732e3b..29e6dc7299b 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, if (!goal) goal = sbi->s_inode_goal; - if (goal && goal < le32_to_cpu(sbi->s_es->s_inodes_count)) { + if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); ret2 = 0; -- cgit v1.2.3 From b767e78a179e5ab30fdbff1686d074ac270471eb Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 4 Jun 2009 08:06:06 -0400 Subject: ext4: Don't look at buffer_heads outside i_size. Buffer heads outside i_size will be unmapped. So when we are doing "walk_page_buffers" limit ourself to i_size. Signed-off-by: Aneesh Kumar K.V Reviewed-by: Josef Bacik Acked-by: Jan Kara Signed-off-by: "Theodore Ts'o" ---- --- fs/ext4/inode.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ff2afc1909b..b87b68cd324 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2578,7 +2578,7 @@ static int ext4_da_writepage(struct page *page, * all are mapped and non delay. We don't want to * do block allocation here. */ - ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, + ret = block_prepare_write(page, 0, len, noalloc_get_block_write); if (!ret) { page_bufs = page_buffers(page); @@ -2600,7 +2600,7 @@ static int ext4_da_writepage(struct page *page, return 0; } /* now mark the buffer_heads as dirty and uptodate */ - block_commit_write(page, 0, PAGE_CACHE_SIZE); + block_commit_write(page, 0, len); } if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) @@ -3246,6 +3246,8 @@ static int ext4_normal_writepage(struct page *page, static int __ext4_journalled_writepage(struct page *page, struct writeback_control *wbc) { + loff_t size; + unsigned int len; struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs; @@ -3253,14 +3255,17 @@ static int __ext4_journalled_writepage(struct page *page, int ret = 0; int err; - ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, - noalloc_get_block_write); + size = i_size_read(inode); + if (page->index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + ret = block_prepare_write(page, 0, len, noalloc_get_block_write); if (ret != 0) goto out_unlock; page_bufs = page_buffers(page); - walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, - bget_one); + walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); /* As soon as we unlock the page, it can go away, but we have * references to buffers so we are safe */ unlock_page(page); @@ -3271,19 +3276,18 @@ static int __ext4_journalled_writepage(struct page *page, goto out; } - ret = walk_page_buffers(handle, page_bufs, 0, - PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); + ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, + do_journal_get_write_access); - err = walk_page_buffers(handle, page_bufs, 0, - PAGE_CACHE_SIZE, NULL, write_end_fn); + err = walk_page_buffers(handle, page_bufs, 0, len, NULL, + write_end_fn); if (ret == 0) ret = err; err = ext4_journal_stop(handle); if (!ret) ret = err; - walk_page_buffers(handle, page_bufs, 0, - PAGE_CACHE_SIZE, NULL, bput_one); + walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; goto out; -- cgit v1.2.3 From c364b22c9580a885e0f8c0d0f9710d67dc448958 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 14 Jun 2009 17:57:10 -0400 Subject: ext4: Fix mmap/truncate race when blocksize < pagesize && delayed allocation It is possible to see buffer_heads which are not mapped in the writepage callback in the following scneario (where the fs blocksize is 1k and the page size is 4k): 1) truncate(f, 1024) 2) mmap(f, 0, 4096) 3) a[0] = 'a' 4) truncate(f, 4096) 5) writepage(...) Now if we get a writepage callback immediately after (4) and before an attempt to write at any other offset via mmap address (which implies we are yet to get a pagefault and do a get_block) what we would have is the page which is dirty have first block allocated and the other three buffer_heads unmapped. In the above case the writepage should go ahead and try to write the first blocks and clear the page_dirty flag. Further attempts to write to the page will again create a fault and result in allocating blocks and marking page dirty. If we don't write any other offset via mmap address we would still have written the first block to the disk and rest of the space will be considered as a hole. So to address this, we change all of the places where we look for delayed, unmapped, or unwritten buffer heads, and only check for delayed or unwritten buffer heads instead. Signed-off-by: Aneesh Kumar K.V Acked-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b87b68cd324..1275f34589c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2305,15 +2305,9 @@ flush_it: return; } -static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) +static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { - /* - * unmapped buffer is possible for holes. - * delay buffer is possible with delayed allocation. - * We also need to consider unwritten buffer as unmapped. - */ - return (!buffer_mapped(bh) || buffer_delay(bh) || - buffer_unwritten(bh)) && buffer_dirty(bh); + return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } /* @@ -2400,7 +2394,7 @@ static int __mpage_da_writepage(struct page *page, * Otherwise we won't make progress * with the page in ext4_da_writepage */ - if (ext4_bh_unmapped_or_delay(NULL, bh)) { + if (ext4_bh_delay_or_unwritten(NULL, bh)) { mpage_add_bh_to_extent(mpd, logical, bh->b_size, bh->b_state); @@ -2517,7 +2511,6 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, * so call get_block_wrap with create = 0 */ ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); - BUG_ON(create && ret == 0); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; @@ -2533,7 +2526,7 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, * - grab_page_cache when doing write_begin (have journal handle) */ static int ext4_da_writepage(struct page *page, - struct writeback_control *wbc) + struct writeback_control *wbc) { int ret = 0; loff_t size; @@ -2551,7 +2544,7 @@ static int ext4_da_writepage(struct page *page, if (page_has_buffers(page)) { page_bufs = page_buffers(page); if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, - ext4_bh_unmapped_or_delay)) { + ext4_bh_delay_or_unwritten)) { /* * We don't want to do block allocation * So redirty the page and return @@ -2584,7 +2577,7 @@ static int ext4_da_writepage(struct page *page, page_bufs = page_buffers(page); /* check whether all are mapped and non delay */ if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, - ext4_bh_unmapped_or_delay)) { + ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; @@ -3232,7 +3225,7 @@ static int ext4_normal_writepage(struct page *page, * happily proceed with mapping them and writing the page. */ BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, - ext4_bh_unmapped_or_delay)); + ext4_bh_delay_or_unwritten)); } if (!ext4_journal_current_handle()) @@ -3322,7 +3315,7 @@ static int ext4_journalled_writepage(struct page *page, * happily proceed with mapping them and writing the page. */ BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, - ext4_bh_unmapped_or_delay)); + ext4_bh_delay_or_unwritten)); } if (ext4_journal_current_handle()) -- cgit v1.2.3 From 43ce1d23b43330634507a049b55c36e91d27282e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 14 Jun 2009 17:58:45 -0400 Subject: ext4: Fix mmap/truncate race when blocksize < pagesize && !nodellaoc This patch fixes the mmap/truncate race that was fixed for delayed allocation by merging ext4_{journalled,normal,da}_writepage() into ext4_writepage(). Signed-off-by: Aneesh Kumar K.V Acked-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 234 ++++++++++++++------------------------------------------ 1 file changed, 57 insertions(+), 177 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 1275f34589c..97c48b5b057 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -47,6 +47,10 @@ #define MPAGE_DA_EXTENT_TAIL 0x01 +static int __ext4_journalled_writepage(struct page *page, + struct writeback_control *wbc, + unsigned int len); + static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { @@ -2392,7 +2396,7 @@ static int __mpage_da_writepage(struct page *page, * We need to try to allocate * unmapped blocks in the same page. * Otherwise we won't make progress - * with the page in ext4_da_writepage + * with the page in ext4_writepage */ if (ext4_bh_delay_or_unwritten(NULL, bh)) { mpage_add_bh_to_extent(mpd, logical, @@ -2519,13 +2523,47 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, } /* + * Note that we don't need to start a transaction unless we're journaling data + * because we should have holes filled from ext4_page_mkwrite(). We even don't + * need to file the inode to the transaction's list in ordered mode because if + * we are writing back data added by write(), the inode is already there and if + * we are writing back data modified via mmap(), noone guarantees in which + * transaction the data will hit the disk. In case we are journaling data, we + * cannot start transaction directly because transaction start ranks above page + * lock so we have to do some magic. + * * This function can get called via... * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via pdflush (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) + * + * We don't do any block allocation in this function. If we have page with + * multiple blocks we need to write those buffer_heads that are mapped. This + * is important for mmaped based write. So if we do with blocksize 1K + * truncate(f, 1024); + * a = mmap(f, 0, 4096); + * a[0] = 'a'; + * truncate(f, 4096); + * we have in the page first buffer_head mapped via page_mkwrite call back + * but other bufer_heads would be unmapped but dirty(dirty done via the + * do_wp_page). So writepage should write the first block. If we modify + * the mmap area beyond 1024 we will again get a page_fault and the + * page_mkwrite callback will do the block allocation and mark the + * buffer_heads mapped. + * + * We redirty the page if we have any buffer_heads that is either delay or + * unwritten in the page. + * + * We can get recursively called as show below. + * + * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> + * ext4_writepage() + * + * But since we don't do any block allocation we should not deadlock. + * Page also have the dirty flag cleared so we don't get recurive page_lock. */ -static int ext4_da_writepage(struct page *page, +static int ext4_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; @@ -2534,7 +2572,7 @@ static int ext4_da_writepage(struct page *page, struct buffer_head *page_bufs; struct inode *inode = page->mapping->host; - trace_ext4_da_writepage(inode, page); + trace_ext4_writepage(inode, page); size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; @@ -2596,6 +2634,15 @@ static int ext4_da_writepage(struct page *page, block_commit_write(page, 0, len); } + if (PageChecked(page) && ext4_should_journal_data(inode)) { + /* + * It's mmapped pagecache. Add buffers and journal it. There + * doesn't seem much point in redirtying the page here. + */ + ClearPageChecked(page); + return __ext4_journalled_writepage(page, wbc, len); + } + if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) ret = nobh_writepage(page, noalloc_get_block_write, wbc); else @@ -3135,112 +3182,10 @@ static int bput_one(handle_t *handle, struct buffer_head *bh) return 0; } -/* - * Note that we don't need to start a transaction unless we're journaling data - * because we should have holes filled from ext4_page_mkwrite(). We even don't - * need to file the inode to the transaction's list in ordered mode because if - * we are writing back data added by write(), the inode is already there and if - * we are writing back data modified via mmap(), noone guarantees in which - * transaction the data will hit the disk. In case we are journaling data, we - * cannot start transaction directly because transaction start ranks above page - * lock so we have to do some magic. - * - * In all journaling modes block_write_full_page() will start the I/O. - * - * Problem: - * - * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> - * ext4_writepage() - * - * Similar for: - * - * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... - * - * Same applies to ext4_get_block(). We will deadlock on various things like - * lock_journal and i_data_sem - * - * Setting PF_MEMALLOC here doesn't work - too many internal memory - * allocations fail. - * - * 16May01: If we're reentered then journal_current_handle() will be - * non-zero. We simply *return*. - * - * 1 July 2001: @@@ FIXME: - * In journalled data mode, a data buffer may be metadata against the - * current transaction. But the same file is part of a shared mapping - * and someone does a writepage() on it. - * - * We will move the buffer onto the async_data list, but *after* it has - * been dirtied. So there's a small window where we have dirty data on - * BJ_Metadata. - * - * Note that this only applies to the last partial page in the file. The - * bit which block_write_full_page() uses prepare/commit for. (That's - * broken code anyway: it's wrong for msync()). - * - * It's a rare case: affects the final partial page, for journalled data - * where the file is subject to bith write() and writepage() in the same - * transction. To fix it we'll need a custom block_write_full_page(). - * We'll probably need that anyway for journalling writepage() output. - * - * We don't honour synchronous mounts for writepage(). That would be - * disastrous. Any write() or metadata operation will sync the fs for - * us. - * - */ -static int __ext4_normal_writepage(struct page *page, - struct writeback_control *wbc) -{ - struct inode *inode = page->mapping->host; - - if (test_opt(inode->i_sb, NOBH)) - return nobh_writepage(page, noalloc_get_block_write, wbc); - else - return block_write_full_page(page, noalloc_get_block_write, - wbc); -} - -static int ext4_normal_writepage(struct page *page, - struct writeback_control *wbc) -{ - struct inode *inode = page->mapping->host; - loff_t size = i_size_read(inode); - loff_t len; - - trace_ext4_normal_writepage(inode, page); - J_ASSERT(PageLocked(page)); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - - if (page_has_buffers(page)) { - /* if page has buffers it should all be mapped - * and allocated. If there are not buffers attached - * to the page we know the page is dirty but it lost - * buffers. That means that at some moment in time - * after write_begin() / write_end() has been called - * all buffers have been clean and thus they must have been - * written at least once. So they are all mapped and we can - * happily proceed with mapping them and writing the page. - */ - BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, - ext4_bh_delay_or_unwritten)); - } - - if (!ext4_journal_current_handle()) - return __ext4_normal_writepage(page, wbc); - - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -} - static int __ext4_journalled_writepage(struct page *page, - struct writeback_control *wbc) + struct writeback_control *wbc, + unsigned int len) { - loff_t size; - unsigned int len; struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs; @@ -3248,16 +3193,8 @@ static int __ext4_journalled_writepage(struct page *page, int ret = 0; int err; - size = i_size_read(inode); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - ret = block_prepare_write(page, 0, len, noalloc_get_block_write); - if (ret != 0) - goto out_unlock; - page_bufs = page_buffers(page); + BUG_ON(!page_bufs); walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); /* As soon as we unlock the page, it can go away, but we have * references to buffers so we are safe */ @@ -3282,67 +3219,10 @@ static int __ext4_journalled_writepage(struct page *page, walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; - goto out; - -out_unlock: - unlock_page(page); out: return ret; } -static int ext4_journalled_writepage(struct page *page, - struct writeback_control *wbc) -{ - struct inode *inode = page->mapping->host; - loff_t size = i_size_read(inode); - loff_t len; - - trace_ext4_journalled_writepage(inode, page); - J_ASSERT(PageLocked(page)); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - - if (page_has_buffers(page)) { - /* if page has buffers it should all be mapped - * and allocated. If there are not buffers attached - * to the page we know the page is dirty but it lost - * buffers. That means that at some moment in time - * after write_begin() / write_end() has been called - * all buffers have been clean and thus they must have been - * written at least once. So they are all mapped and we can - * happily proceed with mapping them and writing the page. - */ - BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, - ext4_bh_delay_or_unwritten)); - } - - if (ext4_journal_current_handle()) - goto no_write; - - if (PageChecked(page)) { - /* - * It's mmapped pagecache. Add buffers and journal it. There - * doesn't seem much point in redirtying the page here. - */ - ClearPageChecked(page); - return __ext4_journalled_writepage(page, wbc); - } else { - /* - * It may be a page full of checkpoint-mode buffers. We don't - * really know unless we go poke around in the buffer_heads. - * But block_write_full_page will do the right thing. - */ - return block_write_full_page(page, noalloc_get_block_write, - wbc); - } -no_write: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -} - static int ext4_readpage(struct file *file, struct page *page) { return mpage_readpage(page, ext4_get_block); @@ -3489,7 +3369,7 @@ static int ext4_journalled_set_page_dirty(struct page *page) static const struct address_space_operations ext4_ordered_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, - .writepage = ext4_normal_writepage, + .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_ordered_write_end, @@ -3504,7 +3384,7 @@ static const struct address_space_operations ext4_ordered_aops = { static const struct address_space_operations ext4_writeback_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, - .writepage = ext4_normal_writepage, + .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_writeback_write_end, @@ -3519,7 +3399,7 @@ static const struct address_space_operations ext4_writeback_aops = { static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, - .writepage = ext4_journalled_writepage, + .writepage = ext4_writepage, .sync_page = block_sync_page, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, @@ -3533,7 +3413,7 @@ static const struct address_space_operations ext4_journalled_aops = { static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, - .writepage = ext4_da_writepage, + .writepage = ext4_writepage, .writepages = ext4_da_writepages, .sync_page = block_sync_page, .write_begin = ext4_da_write_begin, -- cgit v1.2.3 From 62e086be5d2abef8cad854bc5707329ad345f2ec Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 14 Jun 2009 17:59:34 -0400 Subject: ext4: Move __ext4_journalled_writepage() to avoid forward declaration In addition, fix two unused variable warnings. Signed-off-by: Aneesh Kumar K.V Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 112 +++++++++++++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 58 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 97c48b5b057..c98e3afea30 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -47,10 +47,6 @@ #define MPAGE_DA_EXTENT_TAIL 0x01 -static int __ext4_journalled_writepage(struct page *page, - struct writeback_control *wbc, - unsigned int len); - static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { @@ -2522,6 +2518,59 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, return ret; } +static int bget_one(handle_t *handle, struct buffer_head *bh) +{ + get_bh(bh); + return 0; +} + +static int bput_one(handle_t *handle, struct buffer_head *bh) +{ + put_bh(bh); + return 0; +} + +static int __ext4_journalled_writepage(struct page *page, + struct writeback_control *wbc, + unsigned int len) +{ + struct address_space *mapping = page->mapping; + struct inode *inode = mapping->host; + struct buffer_head *page_bufs; + handle_t *handle = NULL; + int ret = 0; + int err; + + page_bufs = page_buffers(page); + BUG_ON(!page_bufs); + walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); + /* As soon as we unlock the page, it can go away, but we have + * references to buffers so we are safe */ + unlock_page(page); + + handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out; + } + + ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, + do_journal_get_write_access); + + err = walk_page_buffers(handle, page_bufs, 0, len, NULL, + write_end_fn); + if (ret == 0) + ret = err; + err = ext4_journal_stop(handle); + if (!ret) + ret = err; + + walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); + EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; +out: + return ret; +} + /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't @@ -2564,7 +2613,7 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, * Page also have the dirty flag cleared so we don't get recurive page_lock. */ static int ext4_writepage(struct page *page, - struct writeback_control *wbc) + struct writeback_control *wbc) { int ret = 0; loff_t size; @@ -3170,59 +3219,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, ext4_get_block); } -static int bget_one(handle_t *handle, struct buffer_head *bh) -{ - get_bh(bh); - return 0; -} - -static int bput_one(handle_t *handle, struct buffer_head *bh) -{ - put_bh(bh); - return 0; -} - -static int __ext4_journalled_writepage(struct page *page, - struct writeback_control *wbc, - unsigned int len) -{ - struct address_space *mapping = page->mapping; - struct inode *inode = mapping->host; - struct buffer_head *page_bufs; - handle_t *handle = NULL; - int ret = 0; - int err; - - page_bufs = page_buffers(page); - BUG_ON(!page_bufs); - walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); - /* As soon as we unlock the page, it can go away, but we have - * references to buffers so we are safe */ - unlock_page(page); - - handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } - - ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, - do_journal_get_write_access); - - err = walk_page_buffers(handle, page_bufs, 0, len, NULL, - write_end_fn); - if (ret == 0) - ret = err; - err = ext4_journal_stop(handle); - if (!ret) - ret = err; - - walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); - EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; -out: - return ret; -} - static int ext4_readpage(struct file *file, struct page *page) { return mpage_readpage(page, ext4_get_block); -- cgit v1.2.3 From a566a6b11c86147fe9fc9db7ab15f9eecca3e862 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 15 Jun 2009 08:26:48 +0100 Subject: dlm: Fix uninitialised variable warning in lock.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CC [M] fs/dlm/lock.o fs/dlm/lock.c: In function ‘find_rsb’: fs/dlm/lock.c:438: warning: ‘r’ may be used uninitialized in this function Since r is used on the error path to set r_ret, set it to NULL. Signed-off-by: Steven Whitehouse Signed-off-by: David Teigland --- fs/dlm/lock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 205ec95b347..eb507c453c5 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -435,7 +435,7 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b, static int find_rsb(struct dlm_ls *ls, char *name, int namelen, unsigned int flags, struct dlm_rsb **r_ret) { - struct dlm_rsb *r, *tmp; + struct dlm_rsb *r = NULL, *tmp; uint32_t hash, bucket; int error = -EINVAL; -- cgit v1.2.3 From c78a87d0a1fc885dfdbe21fd5e07787691dfb068 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Thu, 18 Jun 2009 13:20:24 -0500 Subject: dlm: fix plock use-after-free Fix a regression from the original addition of nfs lock support 586759f03e2e9031ac5589912a51a909ed53c30a. When a synchronous (non-nfs) plock completes, the waiting thread will wake up and free the op struct. This races with the user thread in dev_write() which goes on to read the op's callback field to check if the lock is async and needs a callback. This check can happen on the freed op. The fix is to note the callback value before the op can be freed. Signed-off-by: David Teigland --- fs/dlm/plock.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index 894a32d438d..16f682e26c0 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -353,7 +353,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, { struct dlm_plock_info info; struct plock_op *op; - int found = 0; + int found = 0, do_callback = 0; if (count != sizeof(info)) return -EINVAL; @@ -366,21 +366,24 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, spin_lock(&ops_lock); list_for_each_entry(op, &recv_list, list) { - if (op->info.fsid == info.fsid && op->info.number == info.number && + if (op->info.fsid == info.fsid && + op->info.number == info.number && op->info.owner == info.owner) { + struct plock_xop *xop = (struct plock_xop *)op; list_del_init(&op->list); - found = 1; - op->done = 1; memcpy(&op->info, &info, sizeof(info)); + if (xop->callback) + do_callback = 1; + else + op->done = 1; + found = 1; break; } } spin_unlock(&ops_lock); if (found) { - struct plock_xop *xop; - xop = (struct plock_xop *)op; - if (xop->callback) + if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); -- cgit v1.2.3 From b76a3f93d01fc93a87cb6eba4e854ffe378b4bac Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Mon, 8 Jun 2009 19:28:41 +0300 Subject: exofs: Fix bio leak in error handling path (sync read) When failing a read request in the sync path, called from write_begin, I forgot to free the allocated bio, fix it. Signed-off-by: Boaz Harrosh --- fs/exofs/inode.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 77d0a295eb1..bb5d6ed0f7a 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -295,6 +295,9 @@ static int read_exec(struct page_collect *pcol, bool is_sync) err: if (!is_sync) _unlock_pcol_pages(pcol, ret, READ); + else /* Pages unlocked by caller in sync mode only free bio */ + pcol_free(pcol); + kfree(pcol_copy); if (or) osd_end_request(or); -- cgit v1.2.3 From 27d2e1491985e95c486d991302e399f5c584b4eb Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 14 Jun 2009 17:23:09 +0300 Subject: exofs: Remove IBM copyrights Boaz, Congrats on getting all the OSD stuff into 2.6.30! I just pulled the git, and saw that the IBM copyrights are still there. Please remove them from all files: * Copyright (C) 2005, 2006 * International Business Machines IBM has revoked all rights on the code - they gave it to me. Thanks! Avishay Signed-off-by: Avishay Traeger Signed-off-by: Boaz Harrosh --- fs/exofs/common.h | 4 +--- fs/exofs/dir.c | 4 +--- fs/exofs/exofs.h | 4 +--- fs/exofs/file.c | 4 +--- fs/exofs/inode.c | 4 +--- fs/exofs/namei.c | 4 +--- fs/exofs/osd.c | 4 +--- fs/exofs/super.c | 4 +--- fs/exofs/symlink.c | 4 +--- 9 files changed, 9 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/exofs/common.h b/fs/exofs/common.h index 24667eedc02..c6718e4817f 100644 --- a/fs/exofs/common.h +++ b/fs/exofs/common.h @@ -2,9 +2,7 @@ * common.h - Common definitions for both Kernel and user-mode utilities * * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 65b0c8c776a..4cfab1cc75c 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index 0fd4c785967..c413b74ecf3 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/file.c b/fs/exofs/file.c index 6ed7fe48475..c6810038d63 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index bb5d6ed0f7a..6c10f747669 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 77fdd765e76..b7dd0c23686 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c index b3d2ccb87aa..4372542df28 100644 --- a/fs/exofs/osd.c +++ b/fs/exofs/osd.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 8216c5b77b5..e47b38e55a2 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c index 36e2d7bc7f7..4dd687c3e74 100644 --- a/fs/exofs/symlink.c +++ b/fs/exofs/symlink.c @@ -1,8 +1,6 @@ /* * Copyright (C) 2005, 2006 - * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com) - * Copyright (C) 2005, 2006 - * International Business Machines + * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh * -- cgit v1.2.3 From baaf94cdc7fe1c61e3c660a3b055724fd9d0a034 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 14 Jun 2009 16:52:10 +0300 Subject: exofs: Avoid using file_fsync() The use of file_fsync() in exofs_file_sync() is not necessary since it does some extra stuff not used by exofs. Open code just the parts that are currently needed. TODO: Farther optimization can be done to sync the sb only on inode update of new files, Usually the sb update is not needed in exofs. Signed-off-by: Boaz Harrosh --- fs/exofs/exofs.h | 3 +++ fs/exofs/file.c | 17 ++++++++++++----- fs/exofs/super.c | 2 +- 3 files changed, 16 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index c413b74ecf3..5ec72e020b2 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h @@ -154,6 +154,9 @@ ino_t exofs_parent_ino(struct dentry *child); int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *, struct inode *); +/* super.c */ +int exofs_sync_fs(struct super_block *sb, int wait); + /********************* * operation vectors * *********************/ diff --git a/fs/exofs/file.c b/fs/exofs/file.c index c6810038d63..839b9dc1e70 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -45,16 +45,23 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry, { int ret; struct address_space *mapping = filp->f_mapping; + struct inode *inode = dentry->d_inode; + struct super_block *sb; ret = filemap_write_and_wait(mapping); if (ret) return ret; - /*Note: file_fsync below also calles sync_blockdev, which is a no-op - * for exofs, but other then that it does sync_inode and - * sync_superblock which is what we need here. - */ - return file_fsync(filp, dentry, datasync); + /* sync the inode attributes */ + ret = write_inode_now(inode, 1); + + /* This is a good place to write the sb */ + /* TODO: Sechedule an sb-sync on create */ + sb = inode->i_sb; + if (sb->s_dirt) + exofs_sync_fs(sb, 1); + + return ret; } static int exofs_flush(struct file *file, fl_owner_t id) diff --git a/fs/exofs/super.c b/fs/exofs/super.c index e47b38e55a2..a343b4ea62f 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -198,7 +198,7 @@ static const struct export_operations exofs_export_ops; /* * Write the superblock to the OSD */ -static int exofs_sync_fs(struct super_block *sb, int wait) +int exofs_sync_fs(struct super_block *sb, int wait) { struct exofs_sb_info *sbi; struct exofs_fscb *fscb; -- cgit v1.2.3 From cc0bad7552308e8905d6ea56e6b7811fa67e716d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 25 Jun 2009 00:56:52 -0400 Subject: cifs: add new cifs_iget function and convert unix codepath to use it cifs: add new cifs_iget function and convert unix codepath to use it In order to unify some codepaths, introduce a common cifs_fattr struct for storing inode attributes. The different codepaths (unix, legacy, normal, etc...) can fill out this struct with inode info. It can then be passed as an arg to a common set of routines to get and update inodes. Add a new cifs_iget function that uses iget5_locked to identify inodes. This will compare inodes based on the uniqueid value in a cifs_fattr struct. Rather than filling out an already-created inode, have cifs_get_inode_info_unix instead fill out cifs_fattr and hand that off to cifs_iget. cifs_iget can then properly look for hardlinked inodes. On the readdir side, add a new cifs_readdir_lookup function that spawns populated dentries. Redefine FILE_UNIX_INFO so that it's basically a FILE_UNIX_BASIC_INFO that has a few fields wrapped around it. This allows us to more easily use the same function for filling out the fattr as the non-readdir codepath. With this, we should then have proper hardlink detection and can eventually get rid of some nasty CIFS-specific hacks for handing them. Signed-off-by: Jeff Layton Reviewed-by: Christoph Hellwig Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 13 ++ fs/cifs/cifsglob.h | 25 ++++ fs/cifs/cifspdu.h | 14 +- fs/cifs/cifsproto.h | 9 +- fs/cifs/dir.c | 22 +-- fs/cifs/inode.c | 380 ++++++++++++++++++++++++++-------------------------- fs/cifs/readdir.c | 253 +++++++++++++--------------------- 7 files changed, 339 insertions(+), 377 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 9570a0e8023..586df24c9ab 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -24,6 +24,19 @@ #define ROOT_I 2 +/* + * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down + * so that it will fit. + */ +static inline ino_t +cifs_uniqueid_to_ino_t(u64 fileid) +{ + ino_t ino = (ino_t) fileid; + if (sizeof(ino_t) < sizeof(u64)) + ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8; + return ino; +} + extern struct file_system_type cifs_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index e1225e6ded2..e6435cba811 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -371,6 +371,7 @@ struct cifsInodeInfo { bool oplockPending:1; bool delete_pending:1; /* DELETE_ON_CLOSE is set */ u64 server_eof; /* current file size on server */ + u64 uniqueid; /* server inode number */ struct inode vfs_inode; }; @@ -472,6 +473,30 @@ struct dfs_info3_param { char *node_name; }; +/* + * common struct for holding inode info when searching for or updating an + * inode with new info + */ + +#define CIFS_FATTR_DFS_REFERRAL 0x1 + +struct cifs_fattr { + u32 cf_flags; + u32 cf_cifsattrs; + u64 cf_uniqueid; + u64 cf_eof; + u64 cf_bytes; + uid_t cf_uid; + gid_t cf_gid; + umode_t cf_mode; + dev_t cf_rdev; + unsigned int cf_nlink; + unsigned int cf_dtype; + struct timespec cf_atime; + struct timespec cf_mtime; + struct timespec cf_ctime; +}; + static inline void free_dfs_info_param(struct dfs_info3_param *param) { if (param) { diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index a785f69dbc9..2d07f890a84 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -2328,19 +2328,7 @@ struct file_attrib_tag { typedef struct { __le32 NextEntryOffset; __u32 ResumeKey; /* as with FileIndex - no need to convert */ - __le64 EndOfFile; - __le64 NumOfBytes; - __le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */ - __le64 LastAccessTime; - __le64 LastModificationTime; - __le64 Uid; - __le64 Gid; - __le32 Type; - __le64 DevMajor; - __le64 DevMinor; - __le64 UniqueId; - __le64 Permissions; - __le64 Nlinks; + FILE_UNIX_BASIC_INFO basic; char FileName[1]; } __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index c419416a42e..b2bd83fd2aa 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -98,9 +98,14 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, extern int cifs_posix_open(char *full_path, struct inode **pinode, struct super_block *sb, int mode, int oflags, int *poplock, __u16 *pnetfid, int xid); -extern void posix_fill_in_inode(struct inode *tmp_inode, - FILE_UNIX_BASIC_INFO *pData, int isNewInode); +extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, + FILE_UNIX_BASIC_INFO *info, + struct cifs_sb_info *cifs_sb); +extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr); extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum); +extern struct inode *cifs_iget(struct super_block *sb, + struct cifs_fattr *fattr); + extern int cifs_get_inode_info(struct inode **pinode, const unsigned char *search_path, FILE_ALL_INFO *pfile_info, diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 7dc6b74f9de..a40054faed7 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -188,6 +188,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, FILE_UNIX_BASIC_INFO *presp_data; __u32 posix_flags = 0; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + struct cifs_fattr fattr; cFYI(1, ("posix open %s", full_path)); @@ -236,22 +237,21 @@ int cifs_posix_open(char *full_path, struct inode **pinode, if (presp_data->Type == cpu_to_le32(-1)) goto posix_open_ret; /* open ok, caller does qpathinfo */ - /* get new inode and set it up */ if (!pinode) goto posix_open_ret; /* caller does not need info */ + cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); + + /* get new inode and set it up */ if (*pinode == NULL) { - __u64 unique_id = le64_to_cpu(presp_data->UniqueId); - *pinode = cifs_new_inode(sb, &unique_id); + *pinode = cifs_iget(sb, &fattr); + if (!*pinode) { + rc = -ENOMEM; + goto posix_open_ret; + } + } else { + cifs_fattr_to_inode(*pinode, &fattr); } - /* else an inode was passed in. Update its info, don't create one */ - - /* We do not need to close the file if new_inode fails since - the caller will retry qpathinfo as long as inode is null */ - if (*pinode == NULL) - goto posix_open_ret; - - posix_fill_in_inode(*pinode, presp_data, 1); cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 155c9e785d0..b22379610d7 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -77,127 +77,146 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) } } -static void cifs_unix_info_to_inode(struct inode *inode, - FILE_UNIX_BASIC_INFO *info, int force_uid_gid) +/* populate an inode with info from a cifs_fattr struct */ +void +cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); - struct cifsInodeInfo *cifsInfo = CIFS_I(inode); - __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes); - __u64 end_of_file = le64_to_cpu(info->EndOfFile); + struct cifsInodeInfo *cifs_i = CIFS_I(inode); + unsigned long now = jiffies; + + inode->i_atime = fattr->cf_atime; + inode->i_mtime = fattr->cf_mtime; + inode->i_ctime = fattr->cf_ctime; + inode->i_mode = fattr->cf_mode; + inode->i_rdev = fattr->cf_rdev; + inode->i_nlink = fattr->cf_nlink; + inode->i_uid = fattr->cf_uid; + inode->i_gid = fattr->cf_gid; + + cifs_i->cifsAttrs = fattr->cf_cifsattrs; + cifs_i->uniqueid = fattr->cf_uniqueid; + + cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode, + cifs_i->time, now)); + cifs_i->time = now; + + /* + * Can't safely change the file size here if the client is writing to + * it due to potential races. + */ + spin_lock(&inode->i_lock); + if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { + i_size_write(inode, fattr->cf_eof); + + /* + * i_blocks is not related to (i_size / i_blksize), + * but instead 512 byte (2**9) size is required for + * calculating num blocks. + */ + inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9; + } + spin_unlock(&inode->i_lock); + + cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL); +} + +/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */ +void +cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, + struct cifs_sb_info *cifs_sb) +{ + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_uniqueid = le64_to_cpu(info->UniqueId); + fattr->cf_bytes = le64_to_cpu(info->NumOfBytes); + fattr->cf_eof = le64_to_cpu(info->EndOfFile); - inode->i_atime = cifs_NTtimeToUnix(info->LastAccessTime); - inode->i_mtime = - cifs_NTtimeToUnix(info->LastModificationTime); - inode->i_ctime = cifs_NTtimeToUnix(info->LastStatusChange); - inode->i_mode = le64_to_cpu(info->Permissions); + fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); + fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime); + fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange); + fattr->cf_mode = le64_to_cpu(info->Permissions); /* * Since we set the inode type below we need to mask off * to avoid strange results if bits set above. */ - inode->i_mode &= ~S_IFMT; + fattr->cf_mode &= ~S_IFMT; switch (le32_to_cpu(info->Type)) { case UNIX_FILE: - inode->i_mode |= S_IFREG; + fattr->cf_mode |= S_IFREG; + fattr->cf_dtype = DT_REG; break; case UNIX_SYMLINK: - inode->i_mode |= S_IFLNK; + fattr->cf_mode |= S_IFLNK; + fattr->cf_dtype = DT_LNK; break; case UNIX_DIR: - inode->i_mode |= S_IFDIR; + fattr->cf_mode |= S_IFDIR; + fattr->cf_dtype = DT_DIR; break; case UNIX_CHARDEV: - inode->i_mode |= S_IFCHR; - inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), - le64_to_cpu(info->DevMinor) & MINORMASK); + fattr->cf_mode |= S_IFCHR; + fattr->cf_dtype = DT_CHR; + fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor), + le64_to_cpu(info->DevMinor) & MINORMASK); break; case UNIX_BLOCKDEV: - inode->i_mode |= S_IFBLK; - inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor), - le64_to_cpu(info->DevMinor) & MINORMASK); + fattr->cf_mode |= S_IFBLK; + fattr->cf_dtype = DT_BLK; + fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor), + le64_to_cpu(info->DevMinor) & MINORMASK); break; case UNIX_FIFO: - inode->i_mode |= S_IFIFO; + fattr->cf_mode |= S_IFIFO; + fattr->cf_dtype = DT_FIFO; break; case UNIX_SOCKET: - inode->i_mode |= S_IFSOCK; + fattr->cf_mode |= S_IFSOCK; + fattr->cf_dtype = DT_SOCK; break; default: /* safest to call it a file if we do not know */ - inode->i_mode |= S_IFREG; + fattr->cf_mode |= S_IFREG; + fattr->cf_dtype = DT_REG; cFYI(1, ("unknown type %d", le32_to_cpu(info->Type))); break; } - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) && - !force_uid_gid) - inode->i_uid = cifs_sb->mnt_uid; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) + fattr->cf_uid = cifs_sb->mnt_uid; else - inode->i_uid = le64_to_cpu(info->Uid); + fattr->cf_uid = le64_to_cpu(info->Uid); - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) && - !force_uid_gid) - inode->i_gid = cifs_sb->mnt_gid; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) + fattr->cf_gid = cifs_sb->mnt_gid; else - inode->i_gid = le64_to_cpu(info->Gid); + fattr->cf_gid = le64_to_cpu(info->Gid); - inode->i_nlink = le64_to_cpu(info->Nlinks); - - cifsInfo->server_eof = end_of_file; - spin_lock(&inode->i_lock); - if (is_size_safe_to_change(cifsInfo, end_of_file)) { - /* - * We can not safely change the file size here if the client - * is writing to it due to potential races. - */ - i_size_write(inode, end_of_file); - - /* - * i_blocks is not related to (i_size / i_blksize), - * but instead 512 byte (2**9) size is required for - * calculating num blocks. - */ - inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; - } - spin_unlock(&inode->i_lock); + fattr->cf_nlink = le64_to_cpu(info->Nlinks); } - /* - * Needed to setup inode data for the directory which is the - * junction to the new submount (ie to setup the fake directory - * which represents a DFS referral) + * Fill a cifs_fattr struct with fake inode info. + * + * Needed to setup cifs_fattr data for the directory which is the + * junction to the new submount (ie to setup the fake directory + * which represents a DFS referral). */ -static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat, - struct super_block *sb) +void +cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) { - struct inode *pinode = NULL; - - memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO)); + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); -/* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); - __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0); - __u64 UniqueId = 0; */ - pfnd_dat->LastStatusChange = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->LastAccessTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->LastModificationTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->Type = cpu_to_le32(UNIX_DIR); - pfnd_dat->Permissions = cpu_to_le64(S_IXUGO | S_IRWXU); - pfnd_dat->Nlinks = cpu_to_le64(2); - if (sb->s_root) - pinode = sb->s_root->d_inode; - if (pinode == NULL) - return; - - /* fill in default values for the remaining based on root - inode since we can not query the server for this inode info */ - pfnd_dat->DevMajor = cpu_to_le64(MAJOR(pinode->i_rdev)); - pfnd_dat->DevMinor = cpu_to_le64(MINOR(pinode->i_rdev)); - pfnd_dat->Uid = cpu_to_le64(pinode->i_uid); - pfnd_dat->Gid = cpu_to_le64(pinode->i_gid); + cFYI(1, ("creating fake fattr for DFS referral")); + + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU; + fattr->cf_uid = cifs_sb->mnt_uid; + fattr->cf_gid = cifs_sb->mnt_gid; + fattr->cf_atime = CURRENT_TIME; + fattr->cf_ctime = CURRENT_TIME; + fattr->cf_mtime = CURRENT_TIME; + fattr->cf_nlink = 2; + fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL; } /** @@ -244,66 +263,42 @@ cifs_new_inode(struct super_block *sb, __u64 *inum) } int cifs_get_inode_info_unix(struct inode **pinode, - const unsigned char *full_path, struct super_block *sb, int xid) + const unsigned char *full_path, + struct super_block *sb, int xid) { - int rc = 0; + int rc; FILE_UNIX_BASIC_INFO find_data; - struct cifsTconInfo *pTcon; - struct inode *inode; + struct cifs_fattr fattr; + struct cifsTconInfo *tcon; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - bool is_dfs_referral = false; - struct cifsInodeInfo *cifsInfo; - __u64 num_of_bytes; - __u64 end_of_file; - pTcon = cifs_sb->tcon; + tcon = cifs_sb->tcon; cFYI(1, ("Getting info on %s", full_path)); /* could have done a find first instead but this returns more info */ - rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data, + rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc == -EREMOTE && !is_dfs_referral) { - is_dfs_referral = true; - cFYI(DBG2, ("DFS ref")); - /* for DFS, server does not give us real inode data */ - fill_fake_finddataunix(&find_data, sb); - rc = 0; - } else if (rc) - goto cgiiu_exit; - num_of_bytes = le64_to_cpu(find_data.NumOfBytes); - end_of_file = le64_to_cpu(find_data.EndOfFile); + if (!rc) { + cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb); + } else if (rc == -EREMOTE) { + cifs_create_dfs_fattr(&fattr, sb); + rc = 0; + } else { + return rc; + } - /* get new inode */ if (*pinode == NULL) { - __u64 unique_id = le64_to_cpu(find_data.UniqueId); - *pinode = cifs_new_inode(sb, &unique_id); - if (*pinode == NULL) { + /* get new inode */ + *pinode = cifs_iget(sb, &fattr); + if (!*pinode) rc = -ENOMEM; - goto cgiiu_exit; - } + } else { + /* we already have inode, update it */ + cifs_fattr_to_inode(*pinode, &fattr); } - inode = *pinode; - cifsInfo = CIFS_I(inode); - - cFYI(1, ("Old time %ld", cifsInfo->time)); - cifsInfo->time = jiffies; - cFYI(1, ("New time %ld", cifsInfo->time)); - /* this is ok to set on every inode revalidate */ - atomic_set(&cifsInfo->inUse, 1); - - cifs_unix_info_to_inode(inode, &find_data, 0); - - if (num_of_bytes < end_of_file) - cFYI(1, ("allocation size less than end of file")); - cFYI(1, ("Size %ld and blocks %llu", - (unsigned long) inode->i_size, - (unsigned long long)inode->i_blocks)); - - cifs_set_ops(inode, is_dfs_referral); -cgiiu_exit: return rc; } @@ -695,33 +690,85 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb) return full_path; } +static int +cifs_find_inode(struct inode *inode, void *opaque) +{ + struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + + if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) + return 0; + + return 1; +} + +static int +cifs_init_inode(struct inode *inode, void *opaque) +{ + struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + + CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; + return 0; +} + +/* Given fattrs, get a corresponding inode */ +struct inode * +cifs_iget(struct super_block *sb, struct cifs_fattr *fattr) +{ + unsigned long hash; + struct inode *inode; + + cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid)); + + /* hash down to 32-bits on 32-bit arch */ + hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid); + + inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr); + + /* we have fattrs in hand, update the inode */ + if (inode) { + cifs_fattr_to_inode(inode, fattr); + if (sb->s_flags & MS_NOATIME) + inode->i_flags |= S_NOATIME | S_NOCMTIME; + if (inode->i_state & I_NEW) { + inode->i_ino = hash; + unlock_new_inode(inode); + } + } + + return inode; +} + /* gets root inode */ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) { int xid; struct cifs_sb_info *cifs_sb; - struct inode *inode; + struct inode *inode = NULL; long rc; char *full_path; - inode = iget_locked(sb, ino); - if (!inode) - return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) - return inode; - - cifs_sb = CIFS_SB(inode->i_sb); + cifs_sb = CIFS_SB(sb); full_path = cifs_build_path_to_root(cifs_sb); if (full_path == NULL) return ERR_PTR(-ENOMEM); xid = GetXid(); - if (cifs_sb->tcon->unix_ext) - rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, - xid); - else + if (cifs_sb->tcon->unix_ext) { + rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); + if (!inode) + return ERR_PTR(-ENOMEM); + } else { + inode = iget_locked(sb, ino); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, xid, NULL); + unlock_new_inode(inode); + } + if (rc && cifs_sb->tcon->ipc) { cFYI(1, ("ipc connection - fake read inode")); inode->i_mode |= S_IFDIR; @@ -737,7 +784,6 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) return ERR_PTR(rc); } - unlock_new_inode(inode); kfree(full_path); /* can not call macro FreeXid here since in a void func @@ -1063,44 +1109,6 @@ out_reval: return rc; } -void posix_fill_in_inode(struct inode *tmp_inode, - FILE_UNIX_BASIC_INFO *pData, int isNewInode) -{ - struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); - loff_t local_size; - struct timespec local_mtime; - - cifsInfo->time = jiffies; - atomic_inc(&cifsInfo->inUse); - - /* save mtime and size */ - local_mtime = tmp_inode->i_mtime; - local_size = tmp_inode->i_size; - - cifs_unix_info_to_inode(tmp_inode, pData, 1); - cifs_set_ops(tmp_inode, false); - - if (!S_ISREG(tmp_inode->i_mode)) - return; - - /* - * No sense invalidating pages for new inode - * since we we have not started caching - * readahead file data yet. - */ - if (isNewInode) - return; - - if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) && - (local_size == tmp_inode->i_size)) { - cFYI(1, ("inode exists but unchanged")); - } else { - /* file may have changed on server */ - cFYI(1, ("invalidate inode, readdir detected change")); - invalidate_remote_inode(tmp_inode); - } -} - int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) { int rc = 0, tmprc; @@ -1109,6 +1117,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) struct cifsTconInfo *pTcon; char *full_path = NULL; struct inode *newinode = NULL; + struct cifs_fattr fattr; cFYI(1, ("In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode)); @@ -1148,7 +1157,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) cFYI(1, ("posix mkdir returned 0x%x", rc)); d_drop(direntry); } else { - __u64 unique_id; if (pInfo->Type == cpu_to_le32(-1)) { /* no return info, go query for it */ kfree(pInfo); @@ -1162,20 +1170,15 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) else direntry->d_op = &cifs_dentry_ops; - unique_id = le64_to_cpu(pInfo->UniqueId); - newinode = cifs_new_inode(inode->i_sb, &unique_id); - if (newinode == NULL) { + cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); + newinode = cifs_iget(inode->i_sb, &fattr); + if (!newinode) { kfree(pInfo); goto mkdir_get_info; } - newinode->i_nlink = 2; d_instantiate(direntry, newinode); - /* we already checked in POSIXCreate whether - frame was long enough */ - posix_fill_in_inode(direntry->d_inode, - pInfo, 1 /* NewInode */); #ifdef CONFIG_CIFS_DEBUG2 cFYI(1, ("instantiated dentry %p %s to inode %p", direntry, direntry->d_name.name, newinode)); @@ -1622,6 +1625,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, if (!err) { generic_fillattr(dentry->d_inode, stat); stat->blksize = CIFS_MAX_MSGSIZE; + stat->ino = CIFS_I(dentry->d_inode)->uniqueid; } return err; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 86d0055dc52..231aa6953f8 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -63,6 +63,55 @@ static inline void dump_cifs_file_struct(struct file *file, char *label) } #endif /* DEBUG2 */ +/* + * Find the dentry that matches "name". If there isn't one, create one. If it's + * a negative dentry or the uniqueid changed, then drop it and recreate it. + */ +static struct dentry * +cifs_readdir_lookup(struct dentry *parent, struct qstr *name, + struct cifs_fattr *fattr) +{ + struct dentry *dentry, *alias; + struct inode *inode; + struct super_block *sb = parent->d_inode->i_sb; + + cFYI(1, ("For %s", name->name)); + + dentry = d_lookup(parent, name); + if (dentry) { + /* FIXME: check for inode number changes? */ + if (dentry->d_inode != NULL) + return dentry; + d_drop(dentry); + dput(dentry); + } + + dentry = d_alloc(parent, name); + if (dentry == NULL) + return NULL; + + inode = cifs_iget(sb, fattr); + if (!inode) { + dput(dentry); + return NULL; + } + + if (CIFS_SB(sb)->tcon->nocase) + dentry->d_op = &cifs_ci_dentry_ops; + else + dentry->d_op = &cifs_dentry_ops; + + alias = d_materialise_unique(dentry, inode); + if (alias != NULL) { + dput(dentry); + if (IS_ERR(alias)) + return NULL; + dentry = alias; + } + + return dentry; +} + /* Returns 1 if new inode created, 2 if both dentry and inode were */ /* Might check in the future if inode number changed so we can rehash inode */ static int @@ -76,7 +125,6 @@ construct_dentry(struct qstr *qstring, struct file *file, cFYI(1, ("For %s", qstring->name)); - qstring->hash = full_name_hash(qstring->name, qstring->len); tmp_dentry = d_lookup(file->f_path.dentry, qstring); if (tmp_dentry) { /* BB: overwrite old name? i.e. tmp_dentry->d_name and @@ -299,140 +347,6 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, } } -static void unix_fill_in_inode(struct inode *tmp_inode, - FILE_UNIX_INFO *pfindData, unsigned int *pobject_type, int isNewInode) -{ - loff_t local_size; - struct timespec local_mtime; - - struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); - - __u32 type = le32_to_cpu(pfindData->Type); - __u64 num_of_bytes = le64_to_cpu(pfindData->NumOfBytes); - __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile); - cifsInfo->time = jiffies; - atomic_inc(&cifsInfo->inUse); - - /* save mtime and size */ - local_mtime = tmp_inode->i_mtime; - local_size = tmp_inode->i_size; - - tmp_inode->i_atime = - cifs_NTtimeToUnix(pfindData->LastAccessTime); - tmp_inode->i_mtime = - cifs_NTtimeToUnix(pfindData->LastModificationTime); - tmp_inode->i_ctime = - cifs_NTtimeToUnix(pfindData->LastStatusChange); - - tmp_inode->i_mode = le64_to_cpu(pfindData->Permissions); - /* since we set the inode type below we need to mask off type - to avoid strange results if bits above were corrupt */ - tmp_inode->i_mode &= ~S_IFMT; - if (type == UNIX_FILE) { - *pobject_type = DT_REG; - tmp_inode->i_mode |= S_IFREG; - } else if (type == UNIX_SYMLINK) { - *pobject_type = DT_LNK; - tmp_inode->i_mode |= S_IFLNK; - } else if (type == UNIX_DIR) { - *pobject_type = DT_DIR; - tmp_inode->i_mode |= S_IFDIR; - } else if (type == UNIX_CHARDEV) { - *pobject_type = DT_CHR; - tmp_inode->i_mode |= S_IFCHR; - tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor), - le64_to_cpu(pfindData->DevMinor) & MINORMASK); - } else if (type == UNIX_BLOCKDEV) { - *pobject_type = DT_BLK; - tmp_inode->i_mode |= S_IFBLK; - tmp_inode->i_rdev = MKDEV(le64_to_cpu(pfindData->DevMajor), - le64_to_cpu(pfindData->DevMinor) & MINORMASK); - } else if (type == UNIX_FIFO) { - *pobject_type = DT_FIFO; - tmp_inode->i_mode |= S_IFIFO; - } else if (type == UNIX_SOCKET) { - *pobject_type = DT_SOCK; - tmp_inode->i_mode |= S_IFSOCK; - } else { - /* safest to just call it a file */ - *pobject_type = DT_REG; - tmp_inode->i_mode |= S_IFREG; - cFYI(1, ("unknown inode type %d", type)); - } - - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) - tmp_inode->i_uid = cifs_sb->mnt_uid; - else - tmp_inode->i_uid = le64_to_cpu(pfindData->Uid); - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) - tmp_inode->i_gid = cifs_sb->mnt_gid; - else - tmp_inode->i_gid = le64_to_cpu(pfindData->Gid); - tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks); - - cifsInfo->server_eof = end_of_file; - spin_lock(&tmp_inode->i_lock); - if (is_size_safe_to_change(cifsInfo, end_of_file)) { - /* can not safely change the file size here if the - client is writing to it due to potential races */ - i_size_write(tmp_inode, end_of_file); - - /* 512 bytes (2**9) is the fake blocksize that must be used */ - /* for this calculation, not the real blocksize */ - tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9; - } - spin_unlock(&tmp_inode->i_lock); - - if (S_ISREG(tmp_inode->i_mode)) { - cFYI(1, ("File inode")); - tmp_inode->i_op = &cifs_file_inode_ops; - - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) - tmp_inode->i_fop = &cifs_file_direct_nobrl_ops; - else - tmp_inode->i_fop = &cifs_file_direct_ops; - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) - tmp_inode->i_fop = &cifs_file_nobrl_ops; - else - tmp_inode->i_fop = &cifs_file_ops; - - if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) && - (cifs_sb->tcon->ses->server->maxBuf < - PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)) - tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf; - else - tmp_inode->i_data.a_ops = &cifs_addr_ops; - - if (isNewInode) - return; /* No sense invalidating pages for new inode - since we have not started caching readahead - file data for it yet */ - - if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) && - (local_size == tmp_inode->i_size)) { - cFYI(1, ("inode exists but unchanged")); - } else { - /* file may have changed on server */ - cFYI(1, ("invalidate inode, readdir detected change")); - invalidate_remote_inode(tmp_inode); - } - } else if (S_ISDIR(tmp_inode->i_mode)) { - cFYI(1, ("Directory inode")); - tmp_inode->i_op = &cifs_dir_inode_ops; - tmp_inode->i_fop = &cifs_dir_ops; - } else if (S_ISLNK(tmp_inode->i_mode)) { - cFYI(1, ("Symbolic Link inode")); - tmp_inode->i_op = &cifs_symlink_inode_ops; -/* tmp_inode->i_fop = *//* do not need to set to anything */ - } else { - cFYI(1, ("Special inode")); - init_special_inode(tmp_inode, tmp_inode->i_mode, - tmp_inode->i_rdev); - } -} - /* BB eventually need to add the following helper function to resolve NT_STATUS_STOPPED_ON_SYMLINK return code when we try to do FindFirst on (NTFS) directory symlinks */ @@ -872,7 +786,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, len = strnlen(filename, PATH_MAX); } - *pinum = le64_to_cpu(pFindData->UniqueId); + *pinum = le64_to_cpu(pFindData->basic.UniqueId); } else if (level == SMB_FIND_FILE_DIRECTORY_INFO) { FILE_DIRECTORY_INFO *pFindData = (FILE_DIRECTORY_INFO *)current_entry; @@ -934,9 +848,11 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, struct cifsFileInfo *pCifsF; unsigned int obj_type; __u64 inum; + ino_t ino; struct cifs_sb_info *cifs_sb; struct inode *tmp_inode; struct dentry *tmp_dentry; + struct cifs_fattr fattr; /* get filename and len into qstring */ /* get dentry */ @@ -967,39 +883,49 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, return rc; /* only these two infolevels return valid inode numbers */ - if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX || - pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO) - rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry, - &inum); - else - rc = construct_dentry(&qstring, file, &tmp_inode, &tmp_dentry, - NULL); + if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) { + cifs_unix_basic_to_fattr(&fattr, + &((FILE_UNIX_INFO *) pfindEntry)->basic, + cifs_sb); + tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, + &fattr); + obj_type = fattr.cf_dtype; + ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); + } else { + if (pCifsF->srch_inf.info_level == + SMB_FIND_FILE_ID_FULL_DIR_INFO) + rc = construct_dentry(&qstring, file, &tmp_inode, + &tmp_dentry, &inum); + else + rc = construct_dentry(&qstring, file, &tmp_inode, + &tmp_dentry, NULL); - if ((tmp_inode == NULL) || (tmp_dentry == NULL)) - return -ENOMEM; + if ((tmp_inode == NULL) || (tmp_dentry == NULL)) { + rc = -ENOMEM; + goto out; + } - /* we pass in rc below, indicating whether it is a new inode, - so we can figure out whether to invalidate the inode cached - data if the file has changed */ - if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) - unix_fill_in_inode(tmp_inode, - (FILE_UNIX_INFO *)pfindEntry, - &obj_type, rc); - else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) - fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */, - pfindEntry, &obj_type, rc); - else - fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc); + /* we pass in rc below, indicating whether it is a new inode, + * so we can figure out whether to invalidate the inode cached + * data if the file has changed + */ + if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) + fill_in_inode(tmp_inode, 0, pfindEntry, &obj_type, rc); + else + fill_in_inode(tmp_inode, 1, pfindEntry, &obj_type, rc); - if (rc) /* new inode - needs to be tied to dentry */ { - d_instantiate(tmp_dentry, tmp_inode); - if (rc == 2) - d_rehash(tmp_dentry); - } + /* new inode - needs to be tied to dentry */ + if (rc) { + d_instantiate(tmp_dentry, tmp_inode); + if (rc == 2) + d_rehash(tmp_dentry); + } + ino = cifs_uniqueid_to_ino_t(tmp_inode->i_ino); + } rc = filldir(direntry, qstring.name, qstring.len, file->f_pos, - tmp_inode->i_ino, obj_type); + ino, obj_type); if (rc) { cFYI(1, ("filldir rc = %d", rc)); /* we can not return filldir errors to the caller @@ -1008,6 +934,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, rc = -EOVERFLOW; } +out: dput(tmp_dentry); return rc; } -- cgit v1.2.3 From 0cfae3d8795f388f9de78adb0171520d19da77e9 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 18 Jun 2009 11:42:53 +0900 Subject: nilfs2: remove unlikely directive causing mis-conversion of error code The following error code handling in nilfs_segctor_write() function wrongly converted negative error codes to a truth value (i.e. 1): err = unlikely(err) ? : res; which originaly meant to be err = err ? : res; This mis-conversion caused that write or sync functions receive the unexpected error code. This fixes the bug by removing the unlikely directive. Signed-off-by: Ryusuke Konishi Cc: stable@kernel.org --- fs/nilfs2/segment.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index aa977549919..c1824915c1c 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1829,8 +1829,8 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci, err = nilfs_segbuf_write(segbuf, &wi); res = nilfs_segbuf_wait(segbuf, &wi); - err = unlikely(err) ? : res; - if (unlikely(err)) + err = err ? : res; + if (err) return err; } return 0; -- cgit v1.2.3 From 8227b29722fdbac72357aae155d171a5c777670c Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 18 Jun 2009 23:52:23 +0900 Subject: nilfs2: fix hang problem of log writer which occurs after write failures Leandro Lucarella gave me a report that nilfs gets stuck after its write function fails. The problem turned out to be caused by bugs which leave writeback flag on pages. This fixes the problem by ensuring to clear the writeback flag in error path. Reported-by: Leandro Lucarella Signed-off-by: Ryusuke Konishi Cc: stable@kernel.org --- fs/nilfs2/segment.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index c1824915c1c..8b5e4778cf2 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1836,19 +1836,6 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci, return 0; } -static int nilfs_page_has_uncleared_buffer(struct page *page) -{ - struct buffer_head *head, *bh; - - head = bh = page_buffers(page); - do { - if (buffer_dirty(bh) && !list_empty(&bh->b_assoc_buffers)) - return 1; - bh = bh->b_this_page; - } while (bh != head); - return 0; -} - static void __nilfs_end_page_io(struct page *page, int err) { if (!err) { @@ -1872,12 +1859,11 @@ static void nilfs_end_page_io(struct page *page, int err) if (!page) return; - if (buffer_nilfs_node(page_buffers(page)) && - nilfs_page_has_uncleared_buffer(page)) - /* For b-tree node pages, this function may be called twice - or more because they might be split in a segment. - This check assures that cleanup has been done for all - buffers in a split btnode page. */ + if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) + /* + * For b-tree node pages, this function may be called twice + * or more because they might be split in a segment. + */ return; __nilfs_end_page_io(page, err); @@ -1940,7 +1926,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci, } if (bh->b_page != fs_page) { nilfs_end_page_io(fs_page, err); - if (unlikely(fs_page == failed_page)) + if (fs_page && fs_page == failed_page) goto done; fs_page = bh->b_page; } -- cgit v1.2.3 From 4a52df779700080de4afb0436d9dd9188514a69b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 18 Jun 2009 23:53:25 +0900 Subject: nilfs2: fix incorrect KERN_CRIT messages in case of write failures In case of write-failure retries, the following KERN_CRIT level messages are mistakenly output by nilfs_dat_commit_start() function: nilfs_dat_commit_start: vbn = 408463, start = 12506, end = 18446744073709551615, pbn = 530210 nilfs_dat_commit_start: vbn = 408515, start = 12506, end = 18446744073709551615, pbn = 530211 nilfs_dat_commit_start: vbn = 408464, start = 12506, end = 18446744073709551615, pbn = 530212 ... This suppresses these messages. Signed-off-by: Ryusuke Konishi Cc: stable@kernel.org --- fs/nilfs2/dat.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 0b2710e2d56..8927ca27e6f 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -134,15 +134,6 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); - if (entry->de_blocknr != cpu_to_le64(0) || - entry->de_end != cpu_to_le64(NILFS_CNO_MAX)) { - printk(KERN_CRIT - "%s: vbn = %llu, start = %llu, end = %llu, pbn = %llu\n", - __func__, (unsigned long long)req->pr_entry_nr, - (unsigned long long)le64_to_cpu(entry->de_start), - (unsigned long long)le64_to_cpu(entry->de_end), - (unsigned long long)le64_to_cpu(entry->de_blocknr)); - } entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr, KM_USER0); -- cgit v1.2.3 From ff54de363afa4583e2a6249f25fe21dfaeb11ea2 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 19 Jun 2009 02:53:56 +0900 Subject: nilfs2: fix lockdep warning between regular file and inode file This will fix the following false positive of recursive locking which lockdep has detected: ============================================= [ INFO: possible recursive locking detected ] 2.6.30-nilfs #42 --------------------------------------------- nilfs_cleanerd/10607 is trying to acquire lock: (&bmap->b_sem){++++-.}, at: [] nilfs_bmap_lookup_at_level+0x1a/0x74 [nilfs2] but task is already holding lock: (&bmap->b_sem){++++-.}, at: [] nilfs_bmap_truncate+0x19/0x6a [nilfs2] other info that might help us debug this: 2 locks held by nilfs_cleanerd/10607: #0: (&nilfs->ns_segctor_sem){++++.+}, at: [] nilfs_transaction_begin+0xb6/0x10c [nilfs2] #1: (&bmap->b_sem){++++-.}, at: [] nilfs_bmap_truncate+0x19/0x6a [nilfs2] Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 36df60b6d8a..99d58a028b9 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -568,6 +568,7 @@ void nilfs_bmap_abort_update_v(struct nilfs_bmap *bmap, } static struct lock_class_key nilfs_bmap_dat_lock_key; +static struct lock_class_key nilfs_bmap_mdt_lock_key; /** * nilfs_bmap_read - read a bmap from an inode @@ -603,7 +604,11 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) bmap->b_ptr_type = NILFS_BMAP_PTR_VS; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; + lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); break; + case NILFS_IFILE_INO: + lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); + /* Fall through */ default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; -- cgit v1.2.3 From d9a0a345ab7a58a30ec38e5bb7401a28714914d2 Mon Sep 17 00:00:00 2001 From: Jiro SEKIBA Date: Sat, 4 Jul 2009 23:00:53 +0900 Subject: nilfs2: fix disorder in cp count on error during deleting checkpoints This fixes a bug that checkpoint count gets wrong on errors when deleting a series of checkpoints. The count error is persistent since the checkpoint count is stored on disk. Some userland programs refer to the count via ioctl, and this bugfix is needed to prevent malfunction of such programs. Signed-off-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi Cc: stable@kernel.org --- fs/nilfs2/cpfile.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index 7d49813f66d..aec942cf79e 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -307,7 +307,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) { if (ret != -ENOENT) - goto out_header; + break; /* skip hole */ ret = 0; continue; @@ -340,7 +340,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, continue; printk(KERN_ERR "%s: cannot delete block\n", __func__); - goto out_header; + break; } } @@ -358,7 +358,6 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, kunmap_atomic(kaddr, KM_USER0); } - out_header: brelse(header_bh); out_sem: -- cgit v1.2.3 From e3dc5a665d39112e98cfd5bbc7fda2963c00c12c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 22 Jun 2009 17:31:09 +0300 Subject: UBIFS: fix integer overflow warning Fix the following warning: fs/ubifs/io.c: In function 'ubifs_wbuf_init': fs/ubifs/io.c:860: warning: integer overflow in expression And limit maximum hrtimer delta to ULONG_MAX because the argument is 'unsigned long'. Signed-off-by: Adrian Hunter Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index bc5857199ec..2d41ae1d660 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -857,7 +857,9 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) * and hard limits. */ hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0); - wbuf->delta = (DEFAULT_WBUF_TIMEOUT_SECS * NSEC_PER_SEC) * 2 / 10; + wbuf->delta = DEFAULT_WBUF_TIMEOUT_SECS * 1000000000ULL * 2 / 10; + if (wbuf->delta > ULONG_MAX) + wbuf->delta = ULONG_MAX; wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta); hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta); -- cgit v1.2.3 From 70aee2f153972f70fad5f7025134fec063f9efbe Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 18 Jun 2009 13:37:15 +0300 Subject: UBIFS: improve debugging messaged 1. Make the I/O debugging message print the journal head number. 2. Add prints to timer functions. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 2d41ae1d660..2ef689a9a36 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -297,6 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) { struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); + dbg_io("jhead %d", wbuf->jhead); wbuf->need_sync = 1; wbuf->c->need_wbuf_sync = 1; ubifs_wake_up_bgt(wbuf->c); @@ -313,6 +314,9 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) if (!ktime_to_ns(wbuf->softlimit)) return; + dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, + ktime_to_ns(wbuf->softlimit)/USEC_PER_SEC, + (ktime_to_ns(wbuf->softlimit) + wbuf->delta)/USEC_PER_SEC); hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, HRTIMER_MODE_REL); } @@ -349,8 +353,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) /* Write-buffer is empty or not seeked */ return 0; - dbg_io("LEB %d:%d, %d bytes", - wbuf->lnum, wbuf->offs, wbuf->used); + dbg_io("LEB %d:%d, %d bytes, jhead %d", + wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead); ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); ubifs_assert(!(wbuf->avail & 7)); ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); @@ -399,7 +403,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, { const struct ubifs_info *c = wbuf->c; - dbg_io("LEB %d:%d", lnum, offs); + dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); @@ -506,9 +510,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) struct ubifs_info *c = wbuf->c; int err, written, n, aligned_len = ALIGN(len, 8), offs; - dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len, - dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum, - wbuf->offs + wbuf->used); + dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len, + dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead, + wbuf->lnum, wbuf->offs + wbuf->used); ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); @@ -533,8 +537,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) memcpy(wbuf->buf + wbuf->used, buf, len); if (aligned_len == wbuf->avail) { - dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, - wbuf->offs); + dbg_io("flush jhead %d wbuf to LEB %d:%d", + wbuf->jhead, wbuf->lnum, wbuf->offs); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, c->min_io_size, wbuf->dtype); @@ -562,7 +566,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) * minimal I/O unit. We have to fill and flush write-buffer and switch * to the next min. I/O unit. */ - dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs); + dbg_io("flush jhead %d wbuf to LEB %d:%d", + wbuf->jhead, wbuf->lnum, wbuf->offs); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, c->min_io_size, wbuf->dtype); @@ -695,7 +700,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int err, rlen, overlap; struct ubifs_ch *ch = buf; - dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); + dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs, + dbg_ntype(type), len, wbuf->jhead); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); -- cgit v1.2.3 From 0b335b9d7d5f0b832e90ac469480789c07be80ad Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 23 Jun 2009 12:30:43 +0300 Subject: UBIFS: slightly optimize write-buffer timer usage This patch adds the following minor optimization: 1. If write-buffer does not use the timer, indicate it with the wbuf->no_timer variable, instead of using the wbuf->softlimit variable. This is better because wbuf->softlimit is of ktime_t type, and the ktime_to_ns function contains 64-bit multiplication. 2. Do not call the 'hrtimer_cancel()' function for write-buffers which do not use timers. 3. Do not cancel the timer in 'ubifs_put_super()' because the synchronization function does this. This patch also removes a confusing comment. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 9 +++------ fs/ubifs/super.c | 6 ++---- fs/ubifs/ubifs.h | 6 ++++-- 3 files changed, 9 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 2ef689a9a36..9fcf6c38c1b 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -312,7 +312,7 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { ubifs_assert(!hrtimer_active(&wbuf->timer)); - if (!ktime_to_ns(wbuf->softlimit)) + if (wbuf->no_timer) return; dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, ktime_to_ns(wbuf->softlimit)/USEC_PER_SEC, @@ -327,11 +327,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) */ static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { - /* - * If the syncer is waiting for the lock (from the background thread's - * context) and another task is changing write-buffer then the syncing - * should be canceled. - */ + if (wbuf->no_timer) + return; wbuf->need_sync = 0; hrtimer_cancel(&wbuf->timer); } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 79fad43f3c5..5bb272c56a9 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -797,7 +797,7 @@ static int alloc_wbufs(struct ubifs_info *c) * does not need to be synchronized by timer. */ c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; - c->jheads[GCHD].wbuf.softlimit = ktime_set(0, 0); + c->jheads[GCHD].wbuf.no_timer = 1; return 0; } @@ -1754,10 +1754,8 @@ static void ubifs_put_super(struct super_block *sb) /* Synchronize write-buffers */ if (c->jheads) - for (i = 0; i < c->jhead_cnt; i++) { + for (i = 0; i < c->jhead_cnt; i++) ubifs_wbuf_sync(&c->jheads[i].wbuf); - hrtimer_cancel(&c->jheads[i].wbuf.timer); - } /* * On fatal errors c->ro_media is set to 1, in which case we do diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 1bf01d82006..97bc9d09d54 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -654,7 +654,8 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit * and @softlimit + @delta) * @timer: write-buffer timer - * @need_sync: it is set if its timer expired and needs sync + * @no_timer: non-zero if this write-buffer does not timer + * @need_sync: non-zero if its timer expired and needs sync * @next_ino: points to the next position of the following inode number * @inodes: stores the inode numbers of the nodes which are in wbuf * @@ -683,7 +684,8 @@ struct ubifs_wbuf { ktime_t softlimit; unsigned long long delta; struct hrtimer timer; - int need_sync; + unsigned int no_timer:1; + unsigned int need_sync:1; int next_ino; ino_t *inodes; }; -- cgit v1.2.3 From 2a35a3a8ab3e94afd631ed4b45878ceb98f7ab28 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 23 Jun 2009 20:26:33 +0300 Subject: UBIFS: set write-buffer timout to 3-5 seconds This patch cleans up write-buffer timeout initialization and sets it to 3-5 interval. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 17 ++++------------- fs/ubifs/ubifs.h | 5 +++-- 2 files changed, 7 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 9fcf6c38c1b..48d0af94b26 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -828,7 +828,6 @@ out: int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) { size_t size; - ktime_t hardlimit; wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL); if (!wbuf->buf) @@ -854,18 +853,10 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wbuf->timer.function = wbuf_timer_callback_nolock; - /* - * Make write-buffer soft limit to be 20% of the hard limit. The - * write-buffer timer is allowed to expire any time between the soft - * and hard limits. - */ - hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0); - wbuf->delta = DEFAULT_WBUF_TIMEOUT_SECS * 1000000000ULL * 2 / 10; - if (wbuf->delta > ULONG_MAX) - wbuf->delta = ULONG_MAX; - wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta); - hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit, - wbuf->delta); + wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0); + wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT; + wbuf->delta *= 1000000000ULL; + ubifs_assert(wbuf->delta <= ULONG_MAX); return 0; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 97bc9d09d54..c3a707d458a 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -95,8 +95,9 @@ */ #define BGT_NAME_PATTERN "ubifs_bgt%d_%d" -/* Default write-buffer synchronization timeout in seconds */ -#define DEFAULT_WBUF_TIMEOUT_SECS 5 +/* Write-buffer synchronization timeout interval in seconds */ +#define WBUF_TIMEOUT_SOFTLIMIT 3 +#define WBUF_TIMEOUT_HARDLIMIT 5 /* Maximum possible inode number (only 32-bit inodes are supported now) */ #define MAX_INUM 0xFFFFFFFF -- cgit v1.2.3 From cb54ef8b1304fe25f3d57031e0f85558a043239f Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 23 Jun 2009 20:30:32 +0300 Subject: UBIFS: few spelling fixes Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 6 +++--- fs/ubifs/super.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 48d0af94b26..7e4267ecfa5 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -391,7 +391,7 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) * @offs: logical eraseblock offset to seek to * @dtype: data type * - * This function targets the write buffer to logical eraseblock @lnum:@offs. + * This function targets the write-buffer to logical eraseblock @lnum:@offs. * The write-buffer is synchronized if it is not empty. Returns zero in case of * success and a negative error code in case of failure. */ @@ -822,7 +822,7 @@ out: * @c: UBIFS file-system description object * @wbuf: write-buffer to initialize * - * This function initializes write buffer. Returns zero in case of success + * This function initializes write-buffer. Returns zero in case of success * %-ENOMEM in case of failure. */ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) @@ -862,7 +862,7 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) /** * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. - * @wbuf: the write-buffer whereto add + * @wbuf: the write-buffer where to add * @inum: the inode number * * This function adds an inode number to the inode array of the write-buffer. diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 5bb272c56a9..76405c457a3 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -986,7 +986,7 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, switch (token) { /* * %Opt_fast_unmount and %Opt_norm_unmount options are ignored. - * We accepte them in order to be backware-compatible. But this + * We accept them in order to be backward-compatible. But this * should be removed at some point. */ case Opt_fast_unmount: -- cgit v1.2.3 From 44737589442bf69d811e003d9d0064b8fc1541d6 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 24 Jun 2009 10:15:12 +0300 Subject: UBIFS: fix 64-bit divisions in debug print Signed-off-by: Adrian Hunter --- fs/ubifs/io.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 7e4267ecfa5..762a7d6cec7 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -315,8 +315,9 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) if (wbuf->no_timer) return; dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, - ktime_to_ns(wbuf->softlimit)/USEC_PER_SEC, - (ktime_to_ns(wbuf->softlimit) + wbuf->delta)/USEC_PER_SEC); + div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), + div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, + USEC_PER_SEC)); hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, HRTIMER_MODE_REL); } -- cgit v1.2.3 From 681947d2fa1a00629de33c4df3ca72c39f06a14c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 24 Jun 2009 09:59:38 +0300 Subject: UBIFS: minor spelling and grammar fixes Signed-off-by: Adrian Hunter --- fs/ubifs/replay.c | 2 +- fs/ubifs/ubifs.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 11cc80125a4..769be42f39d 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -957,7 +957,7 @@ out: return err; out_dump: - ubifs_err("log error detected while replying the log at LEB %d:%d", + ubifs_err("log error detected while replaying the log at LEB %d:%d", lnum, offs + snod->offs); dbg_dump_node(c, snod->node); ubifs_scan_destroy(sleb); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index c3a707d458a..a2934909442 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -655,8 +655,8 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit * and @softlimit + @delta) * @timer: write-buffer timer - * @no_timer: non-zero if this write-buffer does not timer - * @need_sync: non-zero if its timer expired and needs sync + * @no_timer: non-zero if this write-buffer does not have a timer + * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing * @next_ino: points to the next position of the following inode number * @inodes: stores the inode numbers of the nodes which are in wbuf * -- cgit v1.2.3 From 1fb8bd01ed0af0d0577e010e8c6b4234de583fa6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Jun 2009 18:31:58 +0300 Subject: UBIFS: fix assertion warning When debugging is enabled and an unclean file-system is mounter, the following assertion is triggered: UBIFS assert failed in ubifs_tnc_start_commit at 805 (pid 1081) Call Trace: [cfaffbd0] [c0006cf8] show_stack+0x44/0x16c (unreliable) [cfaffc10] [c011b738] ubifs_tnc_start_commit+0xbb8/0xd18 [cfaffc90] [c0112670] do_commit+0x150/0xa44 [cfaffd10] [c0125234] ubifs_rcvry_gc_commit+0xd8/0x544 [cfaffd60] [c0100e9c] ubifs_fill_super+0xe78/0x15f8 [cfaffdf0] [c0102118] ubifs_get_sb+0x20c/0x320 [cfaffe70] [c007f764] vfs_kern_mount+0x58/0xe0 [cfaffe90] [c007f83c] do_kern_mount+0x40/0xf8 [cfaffeb0] [c0095c24] do_mount+0x550/0x758 [cfafff10] [c0095ebc] sys_mount+0x90/0xe0 [cfafff40] [c000ed4c] ret_from_syscall+0x0/0x3c The reason is that we initialize 'c->min_leb_idx' early, and do not re-calculate it after journal replay. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 76405c457a3..3507d0ed542 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1287,6 +1287,9 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_journal; + /* Calculate 'min_idx_lebs' after journal replay */ + c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); + err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only); if (err) goto out_orphans; -- cgit v1.2.3 From 7fcd9c3ecbf09c0a77db7ba01aac75b32fb79a93 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 2 Jul 2009 17:15:47 +0200 Subject: UBIFS: allow more than one volume to be mounted UBIFS uses a bdi device per volume, but does not care to hand out unique names to each of them. This causes an error when trying to mount more than one volumes. Append the UBI volume and device ID to avoid that. [Amended a bit by Artem Bityutskiy] Signed-off-by: Daniel Mack Cc: Artem Bityutskiy Cc: Adrian Hunter Cc: linux-mtd@lists.infradead.org Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3507d0ed542..26d2e0d8046 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1976,7 +1976,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) err = bdi_init(&c->bdi); if (err) goto out_close; - err = bdi_register(&c->bdi, NULL, "ubifs"); + err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d", + c->vi.ubi_num, c->vi.vol_id); if (err) goto out_bdi; -- cgit v1.2.3 From 3beab0b42413e83a7907db7176b54c840fc75a81 Mon Sep 17 00:00:00 2001 From: "Zhang, Yanmin" Date: Sun, 5 Jul 2009 12:08:08 -0700 Subject: sys_sync(): fix 16% performance regression in ffsb create_4k test I run many ffsb test cases on JBODs (typically 13/12 disks). Comparing with kernel 2.6.30, 2.6.31-rc1 has about 16% regression with ffsb_create_4k. The sub test case creates files continuously for 10 minitues and every file is 1MB. Bisect located below patch. 5cee5815d1564bbbd505fea86f4550f1efdb5cd0 is first bad commit commit 5cee5815d1564bbbd505fea86f4550f1efdb5cd0 Author: Jan Kara Date: Mon Apr 27 16:43:51 2009 +0200 vfs: Make sys_sync() use fsync_super() (version 4) It is unnecessarily fragile to have two places (fsync_super() and do_sync()) doing data integrity sync of the filesystem. Alter __fsync_super() to accommodate needs of both callers and use it. So after this patch __fsync_super() is the only place where we gather all the calls needed to properly send all data on a filesystem to disk. As a matter of fact, ffsb calls sys_sync in the end to make sure all data is flushed to disks and the flushing is counted into the result. vmstat shows ffsb is blocked when syncing for a long time. With 2.6.30, ffsb is blocked for a short time. I checked the patch and did experiments to recover the original methods. Eventually, the root cause is the patch deletes the calling to wakeup_pdflush when syncing, so only ffsb is blocked on disk I/O. wakeup_pdflush could ask pdflush to write back pages with ffsb at the same time. [akpm@linux-foundation.org: restore comment too] Signed-off-by: Zhang Yanmin Cc: Jan Kara Cc: Al Viro Acked-by: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/sync.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/sync.c b/fs/sync.c index dd200025af8..3422ba61d86 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -112,8 +112,13 @@ restart: mutex_unlock(&mutex); } +/* + * sync everything. Start out by waking pdflush, because that writes back + * all queues in parallel. + */ SYSCALL_DEFINE0(sync) { + wakeup_pdflush(0); sync_filesystems(0); sync_filesystems(1); if (unlikely(laptop_mode)) -- cgit v1.2.3 From 793285fcafce4719a05e0c99fa74b188157fe7fe Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 5 Jul 2009 12:08:26 -0700 Subject: cred_guard_mutex: do not return -EINTR to user-space do_execve() and ptrace_attach() return -EINTR if mutex_lock_interruptible(->cred_guard_mutex) fails. This is not right, change the code to return ERESTARTNOINTR. Perhaps we should also change proc_pid_attr_write(). Signed-off-by: Oleg Nesterov Cc: David Howells Acked-by: Roland McGrath Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/compat.c | 4 ++-- fs/exec.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/compat.c b/fs/compat.c index cdd51a3a7c5..fbadb947727 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1486,8 +1486,8 @@ int compat_do_execve(char * filename, if (!bprm) goto out_files; - retval = mutex_lock_interruptible(¤t->cred_guard_mutex); - if (retval < 0) + retval = -ERESTARTNOINTR; + if (mutex_lock_interruptible(¤t->cred_guard_mutex)) goto out_free; current->in_execve = 1; diff --git a/fs/exec.c b/fs/exec.c index e639957d7a5..4a8849e45b2 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1277,8 +1277,8 @@ int do_execve(char * filename, if (!bprm) goto out_files; - retval = mutex_lock_interruptible(¤t->cred_guard_mutex); - if (retval < 0) + retval = -ERESTARTNOINTR; + if (mutex_lock_interruptible(¤t->cred_guard_mutex)) goto out_free; current->in_execve = 1; -- cgit v1.2.3 From d01730d74d2b0155da50d44555001706294014f7 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Tue, 7 Jul 2009 18:15:21 +0200 Subject: quota: Fix possible deadlock during parallel quotaon and quotaoff The following test script triggers a deadlock on ext2 filesystem: while true; do quotaon /dev/hda >&/dev/null; usleep $RANDOM; done & while true; do quotaoff /dev/hda >&/dev/null; usleep $RANDOM; done & I found there is a potential deadlock between quotaon and quotaoff (or quotasync). Basically, all of quotactl operations need to be protected by dqonoff_mutex. vfs_quota_off and vfs_quota_sync also call sb->s_op->quota_write that needs to grab the i_mutex of the quota file. But in vfs_quota_on_inode (called from quotaon operation), the current code tries to grab the i_mutex of the quota file first before getting quonoff_mutex. Reverse the order in which we take locks in vfs_quota_on_inode(). Jan Kara: Changed changelog to be more readable, made lockdep happy with I_MUTEX_QUOTA. Signed-off-by: Jiaying Zhang Signed-off-by: Jan Kara --- fs/quota/dquot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 607c579e5ec..70f36c043d6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2042,8 +2042,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, * changes */ invalidate_bdev(sb->s_bdev); } - mutex_lock(&inode->i_mutex); mutex_lock(&dqopt->dqonoff_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); if (sb_has_quota_loaded(sb, type)) { error = -EBUSY; goto out_lock; @@ -2094,7 +2094,6 @@ out_file_init: dqopt->files[type] = NULL; iput(inode); out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); if (oldflags != -1) { down_write(&dqopt->dqptr_sem); /* Set the flags back (in the case of accidental quotaon() @@ -2104,6 +2103,7 @@ out_lock: up_write(&dqopt->dqptr_sem); } mutex_unlock(&inode->i_mutex); + mutex_unlock(&dqopt->dqonoff_mutex); out_fmt: put_quota_format(fmt); -- cgit v1.2.3 From b43f3cbd21ffbd719fd4fa6642bfe6af255ded34 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 8 Jul 2009 01:54:37 +0400 Subject: headers: mnt_namespace.h redux Fix various silly problems wrt mnt_namespace.h: - exit_mnt_ns() isn't used, remove it - done that, sched.h and nsproxy.h inclusions aren't needed - mount.h inclusion was need for vfsmount_lock, but no longer - remove mnt_namespace.h inclusion from files which don't use anything from mnt_namespace.h Signed-off-by: Alexey Dobriyan Signed-off-by: Linus Torvalds --- fs/afs/mntpt.c | 1 - fs/namespace.c | 1 + fs/nfs/getroot.c | 1 - fs/reiserfs/super.c | 1 - 4 files changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index c52be53f694..5ffb570cd3a 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "internal.h" diff --git a/fs/namespace.c b/fs/namespace.c index 3dc283fd471..277c28a63ea 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 46177cb8706..b35d2a61606 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index d3aeb061612..7adea74d6a8 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From d5ce5b40bc66880d1732461d4b47d7fc3331ed30 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 8 Jul 2009 11:17:34 +0100 Subject: Free the memory allocated by memdup_user() in fs/sysfs/bin.c Commit 1c8542c7bb replaced kmalloc() with memdup_user() in the write() function but also dropped the kfree(temp). The memdup_user() function allocates memory which is never freed. Signed-off-by: Catalin Marinas Cc: Parag Warudkar Signed-off-by: Linus Torvalds --- fs/sysfs/bin.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index 9345806c885..2524714bece 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c @@ -171,6 +171,7 @@ static ssize_t write(struct file *file, const char __user *userbuf, if (count > 0) *off = offs + count; + kfree(temp); return count; } -- cgit v1.2.3 From ad361c9884e809340f6daca80d56a9e9c871690a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 6 Jul 2009 13:05:40 -0700 Subject: Remove multiple KERN_ prefixes from printk formats Commit 5fd29d6ccbc98884569d6f3105aeca70858b3e0f ("printk: clean up handling of log-levels and newlines") changed printk semantics. printk lines with multiple KERN_ prefixes are no longer emitted as before the patch. is now included in the output on each additional use. Remove all uses of multiple KERN_s in formats. Signed-off-by: Joe Perches Signed-off-by: Linus Torvalds --- fs/jffs2/erase.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index a0244740b75..b47679be118 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -270,19 +270,21 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, D2({ int i=0; struct jffs2_raw_node_ref *this; - printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); + printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n"); this = ic->nodes; + printk(KERN_DEBUG); while(this) { - printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); + printk(KERN_CONT "0x%08x(%d)->", + ref_offset(this), ref_flags(this)); if (++i == 5) { - printk("\n" KERN_DEBUG); + printk(KERN_DEBUG); i=0; } this = this->next_in_ino; } - printk("\n"); + printk(KERN_CONT "\n"); }); switch (ic->class) { -- cgit v1.2.3 From 5ddf1e0ff00fd808c048d0b920784828276cc516 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sun, 5 Jul 2009 11:01:02 -0400 Subject: cifs: fix regression with O_EXCL creates and optimize away lookup cifs: fix regression with O_EXCL creates and optimize away lookup Signed-off-by: Jeff Layton Tested-by: Shirish Pargaonkar CC: Stable Kernel Signed-off-by: Steve French --- fs/cifs/dir.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index a40054faed7..ff55fc6932c 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -643,6 +643,15 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } } + /* + * O_EXCL: optimize away the lookup, but don't hash the dentry. Let + * the VFS handle the create. + */ + if (nd->flags & LOOKUP_EXCL) { + d_instantiate(direntry, NULL); + return 0; + } + /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) in which we already have the sb rename sem */ -- cgit v1.2.3 From 086b3640c10ab448a6993c4bae1508f496f530c4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 29 Jun 2009 16:25:33 +0300 Subject: UBIFS: dump a little more in case of corruptions In case of corruptions, dump 8192 bytes instead of 4096. The largest node is 4096+ bytes, so it is better to see a node boundary, which is not always possible when only 4096 bytes are printed. Signed-off-by: Artem Bityutskiy Reviewed-by: Adrian Hunter --- fs/ubifs/scan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 0ed82479b44..165c14ba1a4 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -238,12 +238,12 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, { int len; - ubifs_err("corrupted data at LEB %d:%d", lnum, offs); + ubifs_err("corruption at LEB %d:%d", lnum, offs); if (dbg_failure_mode) return; len = c->leb_size - offs; - if (len > 4096) - len = 4096; + if (len > 8192) + len = 8192; dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1); } -- cgit v1.2.3 From ed43f2f06cc1cec7ec2dc235c908530bc8c796eb Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 29 Jun 2009 17:59:23 +0300 Subject: UBIFS: small amendments in the LEB scanning code This patch fixes few minor things I've spotted while going through code: 1. Better document return codes 2. If 'ubifs_scan_a_node()' returns some thing we do not expect, treat this as an error. 3. Try to do recovery only when 'ubifs_scan()' returns %-EUCLEAN, not on any error. 4. If empty space starts at a non-aligned address, print a message. Signed-off-by: Artem Bityutskiy Reviewed-by: Adrian Hunter --- fs/ubifs/recovery.c | 7 ++++--- fs/ubifs/replay.c | 7 ++++--- fs/ubifs/scan.c | 14 +++++++++----- 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 805605250f1..093a1ecb700 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -543,8 +543,8 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs) * * This function does a scan of a LEB, but caters for errors that might have * been caused by the unclean unmount from which we are attempting to recover. - * - * This function returns %0 on success and a negative error code on failure. + * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is + * found, and a negative error code in case of failure. */ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf, int grouped) @@ -643,7 +643,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, goto corrupted; default: dbg_err("unknown"); - goto corrupted; + err = -EINVAL; + goto error; } } diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 769be42f39d..2970500f32d 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -837,9 +837,10 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) dbg_mnt("replay log LEB %d:%d", lnum, offs); sleb = ubifs_scan(c, lnum, offs, sbuf); - if (IS_ERR(sleb)) { - if (c->need_recovery) - sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); + if (IS_ERR(sleb) ) { + if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery) + return PTR_ERR(sleb); + sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); if (IS_ERR(sleb)) return PTR_ERR(sleb); } diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 165c14ba1a4..892ebfee4fe 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -256,7 +256,9 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, * @sbuf: scan buffer (must be c->leb_size) * * This function scans LEB number @lnum and returns complete information about - * its contents. Returns an error code in case of failure. + * its contents. Returns the scaned information in case of success and, + * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case + * of failure. */ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, int offs, void *sbuf) @@ -279,7 +281,6 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, cond_resched(); ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); - if (ret > 0) { /* Padding bytes or a valid padding node */ offs += ret; @@ -304,7 +305,8 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, goto corrupted; default: dbg_err("unknown"); - goto corrupted; + err = -EINVAL; + goto error; } err = ubifs_add_snod(c, sleb, buf, offs); @@ -317,8 +319,10 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, len -= node_len; } - if (offs % c->min_io_size) - goto corrupted; + if (offs % c->min_io_size) { + ubifs_err("empty space starts at non-aligned offset %d", offs); + goto corrupted;; + } ubifs_end_scan(c, sleb, lnum, offs); -- cgit v1.2.3 From 431102fed3effe4e4e19678830ddab7f05c34bf9 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 29 Jun 2009 18:58:34 +0300 Subject: UBIFS: clean up free space checking recovery.c has 'is_empty()' helper and it is better to use this helper instead of re-implementing it in several places. This patch does this and removes some amount of unneeded code. Signed-off-by: Artem Bityutskiy Reviewed-by: Adrian Hunter --- fs/ubifs/recovery.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 093a1ecb700..fe7af9f676b 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -357,11 +357,7 @@ static int is_last_write(const struct ubifs_info *c, void *buf, int offs) empty_offs = ALIGN(offs + 1, c->min_io_size); check_len = c->leb_size - empty_offs; p = buf + empty_offs - offs; - - for (; check_len > 0; check_len--) - if (*p++ != 0xff) - return 0; - return 1; + return is_empty(p, check_len); } /** @@ -814,7 +810,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, static int recover_head(const struct ubifs_info *c, int lnum, int offs, void *sbuf) { - int len, err, need_clean = 0; + int len, err; if (c->min_io_size > 1) len = c->min_io_size; @@ -828,19 +824,7 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs, /* Read at the head location and check it is empty flash */ err = ubi_read(c->ubi, lnum, sbuf, offs, len); - if (err) - need_clean = 1; - else { - uint8_t *p = sbuf; - - while (len--) - if (*p++ != 0xff) { - need_clean = 1; - break; - } - } - - if (need_clean) { + if (err || !is_empty(sbuf, len)) { dbg_rcvry("cleaning head at %d:%d", lnum, offs); if (offs == 0) return ubifs_leb_unmap(c, lnum); -- cgit v1.2.3 From 061125476039a9a998878468a6abe235b1cee347 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 29 Jun 2009 19:27:14 +0300 Subject: UBIFS: fix corruption dump In the 'ubifs_recover_leb()' function, when we find corrupted empty space, we dump 8K starting from the offset where the last node ends. This is OK if the corrupted empty space is somewhere near that offset. But if the corruption is far at the end of the LEB, we will dump all 0xFF bytes and complitely ignore the interesting data. This is observed on a PPC ("kilauea") with NOR flash. This patch changes the behavior and teaches UBIFS to print only interesting data. I.e., now we find where corruption starts and start dumping from that offset. Signed-off-by: Artem Bityutskiy Reviewed-by: Adrian Hunter --- fs/ubifs/recovery.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index fe7af9f676b..e5f6cf8a115 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -52,6 +52,25 @@ static int is_empty(void *buf, int len) return 1; } +/** + * first_non_ff - find offset of the first non-0xff byte. + * @buf: buffer to search in + * @len: length of buffer + * + * This function returns offset of the first non-0xff byte in @buf or %-1 if + * the buffer contains only 0xff bytes. + */ +static int first_non_ff(void *buf, int len) +{ + uint8_t *p = buf; + int i; + + for (i = 0; i < len; i++) + if (*p++ != 0xff) + return i; + return -1; +} + /** * get_master_node - get the last valid master node allowing for corruption. * @c: UBIFS file-system description object @@ -649,8 +668,13 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, clean_buf(c, &buf, lnum, &offs, &len); need_clean = 1; } else { - ubifs_err("corrupt empty space at LEB %d:%d", - lnum, offs); + int corruption = first_non_ff(buf, len); + + ubifs_err("corrupt empty space LEB %d:%d, corruption " + "starts at %d", lnum, offs, corruption); + /* Make sure we dump interesting non-0xFF data */ + offs = corruption; + buf += corruption; goto corrupted; } } -- cgit v1.2.3 From c4c1bff64dfff4e6dd0936a0340f56b9284512c8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 20:02:48 -0400 Subject: cifs: add pid of initiating process to spnego upcall info cifs: add pid of initiating process to spnego upcall info This will allow the upcall to poke in /proc//environ and get the value of the $KRB5CCNAME env var for the process. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_spnego.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 4a4581cb2b5..051caecf7d6 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -86,6 +86,9 @@ struct key_type cifs_spnego_key_type = { /* strlen of ";user=" */ #define USER_KEY_LEN 6 +/* strlen of ";pid=0x" */ +#define PID_KEY_LEN 7 + /* get a key struct with a SPNEGO security blob, suitable for session setup */ struct key * cifs_get_spnego_key(struct cifsSesInfo *sesInfo) @@ -103,7 +106,8 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) IP_KEY_LEN + INET6_ADDRSTRLEN + MAX_MECH_STR_LEN + UID_KEY_LEN + (sizeof(uid_t) * 2) + - USER_KEY_LEN + strlen(sesInfo->userName) + 1; + USER_KEY_LEN + strlen(sesInfo->userName) + + PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; spnego_key = ERR_PTR(-ENOMEM); description = kzalloc(desc_len, GFP_KERNEL); @@ -141,6 +145,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) dp = description + strlen(description); sprintf(dp, ";user=%s", sesInfo->userName); + dp = description + strlen(description); + sprintf(dp, ";pid=0x%x", current->pid); + cFYI(1, ("key description = %s", description)); spnego_key = request_key(&cifs_spnego_key_type, description, ""); -- cgit v1.2.3 From 01ea95e3b6b16573a491ef98ad63f7a1bdcb504f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 20:02:49 -0400 Subject: cifs: rename CIFSSMBUnixSetInfo to CIFSSMBUnixSetPathInfo cifs: rename CIFSSMBUnixSetInfo to CIFSSMBUnixSetPathInfo ...in preparation of adding a SET_FILE_INFO variant. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 2 +- fs/cifs/cifssmb.c | 6 +++--- fs/cifs/dir.c | 15 ++++++++------- fs/cifs/file.c | 6 +++--- fs/cifs/inode.c | 16 ++++++++-------- 5 files changed, 23 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index b2bd83fd2aa..d95fd427de5 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -220,7 +220,7 @@ struct cifs_unix_set_info_args { dev_t device; }; -extern int CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *pTcon, +extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon, char *fileName, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 61007c62749..1cd01ba0365 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -5075,9 +5075,9 @@ SetAttrLgcyRetry: #endif /* temporarily unneeded SetAttr legacy function */ int -CIFSSMBUnixSetInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, - const struct cifs_unix_set_info_args *args, - const struct nls_table *nls_codepage, int remap) +CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, + const struct cifs_unix_set_info_args *args, + const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ff55fc6932c..4326ffd90fa 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -425,9 +425,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - CIFSSMBUnixSetInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); } else { /* BB implement mode setting via Windows security descriptors e.g. */ @@ -515,10 +516,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path, - &args, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc) { rc = cifs_get_inode_info_unix(&newinode, full_path, diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 97ce4bf89d1..c34b7f8a217 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -448,9 +448,9 @@ int cifs_open(struct inode *inode, struct file *file) .mtime = NO_CHANGE_64, .device = 0, }; - CIFSSMBUnixSetInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & + CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index b22379610d7..ad19007ea05 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1241,10 +1241,10 @@ mkdir_get_info: args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); } else { if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && (mode & S_IWUGO) == 0) { @@ -1876,10 +1876,10 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) args->ctime = NO_CHANGE_64; args->device = 0; - rc = CIFSSMBUnixSetInfo(xid, pTcon, full_path, args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc) rc = inode_setattr(inode, attrs); -- cgit v1.2.3 From 654cf14ac0a71c56c1f0032140c3403382ca076b Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 20:02:49 -0400 Subject: cifs: make a separate function for filling out FILE_UNIX_BASIC_INFO cifs: make a separate function for filling out FILE_UNIX_BASIC_INFO The SET_FILE_INFO variant will need to do the same thing here. Break this code out into a separate function that both variants can call. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 74 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 1cd01ba0365..1f3c8a463fc 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -5074,6 +5074,47 @@ SetAttrLgcyRetry: } #endif /* temporarily unneeded SetAttr legacy function */ +static void +cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, + const struct cifs_unix_set_info_args *args) +{ + u64 mode = args->mode; + + /* + * Samba server ignores set of file size to zero due to bugs in some + * older clients, but we should be precise - we use SetFileSize to + * set file size and do not want to truncate file size to zero + * accidently as happened on one Samba server beta by putting + * zero instead of -1 here + */ + data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); + data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); + data_offset->LastStatusChange = cpu_to_le64(args->ctime); + data_offset->LastAccessTime = cpu_to_le64(args->atime); + data_offset->LastModificationTime = cpu_to_le64(args->mtime); + data_offset->Uid = cpu_to_le64(args->uid); + data_offset->Gid = cpu_to_le64(args->gid); + /* better to leave device as zero when it is */ + data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); + data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); + data_offset->Permissions = cpu_to_le64(mode); + + if (S_ISREG(mode)) + data_offset->Type = cpu_to_le32(UNIX_FILE); + else if (S_ISDIR(mode)) + data_offset->Type = cpu_to_le32(UNIX_DIR); + else if (S_ISLNK(mode)) + data_offset->Type = cpu_to_le32(UNIX_SYMLINK); + else if (S_ISCHR(mode)) + data_offset->Type = cpu_to_le32(UNIX_CHARDEV); + else if (S_ISBLK(mode)) + data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); + else if (S_ISFIFO(mode)) + data_offset->Type = cpu_to_le32(UNIX_FIFO); + else if (S_ISSOCK(mode)) + data_offset->Type = cpu_to_le32(UNIX_SOCKET); +} + int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, const struct cifs_unix_set_info_args *args, @@ -5086,7 +5127,6 @@ CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, int bytes_returned = 0; FILE_UNIX_BASIC_INFO *data_offset; __u16 params, param_offset, offset, count, byte_count; - __u64 mode = args->mode; cFYI(1, ("In SetUID/GID/Mode")); setPermsRetry: @@ -5137,38 +5177,8 @@ setPermsRetry: pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); pSMB->Reserved4 = 0; pSMB->hdr.smb_buf_length += byte_count; - /* Samba server ignores set of file size to zero due to bugs in some - older clients, but we should be precise - we use SetFileSize to - set file size and do not want to truncate file size to zero - accidently as happened on one Samba server beta by putting - zero instead of -1 here */ - data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64); - data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64); - data_offset->LastStatusChange = cpu_to_le64(args->ctime); - data_offset->LastAccessTime = cpu_to_le64(args->atime); - data_offset->LastModificationTime = cpu_to_le64(args->mtime); - data_offset->Uid = cpu_to_le64(args->uid); - data_offset->Gid = cpu_to_le64(args->gid); - /* better to leave device as zero when it is */ - data_offset->DevMajor = cpu_to_le64(MAJOR(args->device)); - data_offset->DevMinor = cpu_to_le64(MINOR(args->device)); - data_offset->Permissions = cpu_to_le64(mode); - - if (S_ISREG(mode)) - data_offset->Type = cpu_to_le32(UNIX_FILE); - else if (S_ISDIR(mode)) - data_offset->Type = cpu_to_le32(UNIX_DIR); - else if (S_ISLNK(mode)) - data_offset->Type = cpu_to_le32(UNIX_SYMLINK); - else if (S_ISCHR(mode)) - data_offset->Type = cpu_to_le32(UNIX_CHARDEV); - else if (S_ISBLK(mode)) - data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV); - else if (S_ISFIFO(mode)) - data_offset->Type = cpu_to_le32(UNIX_FIFO); - else if (S_ISSOCK(mode)) - data_offset->Type = cpu_to_le32(UNIX_SOCKET); + cifs_fill_unix_set_info(data_offset, args); pSMB->ByteCount = cpu_to_le16(byte_count); rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, -- cgit v1.2.3 From 3bbeeb3c93a961bd01b969dd4395ecac0c09db8d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 20:02:50 -0400 Subject: cifs: add and use CIFSSMBUnixSetFileInfo for setattr calls cifs: add and use CIFSSMBUnixSetFileInfo for setattr calls When there's an open filehandle, SET_FILE_INFO is apparently preferred over SET_PATH_INFO. Add a new variant that sets a FILE_UNIX_INFO_BASIC infolevel via SET_FILE_INFO and switch cifs_setattr_unix to use the new call when there's an open filehandle available. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 4 ++++ fs/cifs/cifssmb.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/cifs/inode.c | 11 +++++++++- 3 files changed, 77 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index d95fd427de5..37c11c08c52 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -220,6 +220,10 @@ struct cifs_unix_set_info_args { dev_t device; }; +extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, + const struct cifs_unix_set_info_args *args, + u16 fid, u32 pid_of_opener); + extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon, char *fileName, const struct cifs_unix_set_info_args *args, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 1f3c8a463fc..922f5fe2084 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -5115,6 +5115,69 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, data_offset->Type = cpu_to_le32(UNIX_SOCKET); } +int +CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, + const struct cifs_unix_set_info_args *args, + u16 fid, u32 pid_of_opener) +{ + struct smb_com_transaction2_sfi_req *pSMB = NULL; + FILE_UNIX_BASIC_INFO *data_offset; + int rc = 0; + u16 params, param_offset, offset, byte_count, count; + + cFYI(1, ("Set Unix Info (via SetFileInfo)")); + rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); + + if (rc) + return rc; + + pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); + pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); + + params = 6; + pSMB->MaxSetupCount = 0; + pSMB->Reserved = 0; + pSMB->Flags = 0; + pSMB->Timeout = 0; + pSMB->Reserved2 = 0; + param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; + offset = param_offset + params; + + data_offset = (FILE_UNIX_BASIC_INFO *) + ((char *)(&pSMB->hdr.Protocol) + offset); + count = sizeof(FILE_UNIX_BASIC_INFO); + + pSMB->MaxParameterCount = cpu_to_le16(2); + /* BB find max SMB PDU from sess */ + pSMB->MaxDataCount = cpu_to_le16(1000); + pSMB->SetupCount = 1; + pSMB->Reserved3 = 0; + pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION); + byte_count = 3 /* pad */ + params + count; + pSMB->DataCount = cpu_to_le16(count); + pSMB->ParameterCount = cpu_to_le16(params); + pSMB->TotalDataCount = pSMB->DataCount; + pSMB->TotalParameterCount = pSMB->ParameterCount; + pSMB->ParameterOffset = cpu_to_le16(param_offset); + pSMB->DataOffset = cpu_to_le16(offset); + pSMB->Fid = fid; + pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC); + pSMB->Reserved4 = 0; + pSMB->hdr.smb_buf_length += byte_count; + pSMB->ByteCount = cpu_to_le16(byte_count); + + cifs_fill_unix_set_info(data_offset, args); + + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); + if (rc) + cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc)); + + /* Note: On -EAGAIN error only caller can retry on handle based calls + since file handle passed in no longer valid */ + + return rc; +} + int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, const struct cifs_unix_set_info_args *args, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ad19007ea05..55b616bb381 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1790,6 +1790,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsTconInfo *pTcon = cifs_sb->tcon; struct cifs_unix_set_info_args *args = NULL; + struct cifsFileInfo *open_file; cFYI(1, ("setattr_unix on file %s attrs->ia_valid=0x%x", direntry->d_name.name, attrs->ia_valid)); @@ -1876,10 +1877,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) args->ctime = NO_CHANGE_64; args->device = 0; - rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, + open_file = find_writable_file(cifsInode); + if (open_file) { + u16 nfid = open_file->netfid; + u32 npid = open_file->pid; + rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid); + atomic_dec(&open_file->wrtPending); + } else { + rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + } if (!rc) rc = inode_setattr(inode, attrs); -- cgit v1.2.3 From b77863bfa153e886f9f8faf1a791ba57a36efed0 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 9 Jul 2009 22:51:38 +0000 Subject: [CIFS] update cifs version number Signed-off-by: Steve French --- fs/cifs/CHANGES | 6 +++++- fs/cifs/cifsfs.h | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 3a9b7a58a51..92888aa9074 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -5,7 +5,11 @@ client generated ones by default (mount option "serverino" turned on by default if server supports it). Add forceuid and forcegid mount options (so that when negotiating unix extensions specifying which uid mounted does not immediately force the server's reported -uids to be overridden). Add support for scope moutn parm. +uids to be overridden). Add support for scope mount parm. Improve +hard link detection to use same inode for both. Do not set +read-only dos attribute on directories (for chmod) since Windows +explorer special cases this attribute bit for directories for +a different purpose. Version 1.58 ------------ diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 586df24c9ab..6c170948300 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.59" +#define CIFS_VERSION "1.60" #endif /* _CIFSFS_H */ -- cgit v1.2.3 From 0b8f18e358384a52c1ed7fa7129c08e7eaf86bb6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 01:46:37 -0400 Subject: cifs: convert cifs_get_inode_info and non-posix readdir to use cifs_iget cifs: convert cifs_get_inode_info and non-posix readdir to use cifs_iget Rather than allocating an inode and filling it out, have cifs_get_inode_info fill out a cifs_fattr and call cifs_iget. This means a pretty hefty reorganization of cifs_get_inode_info. For the readdir codepath, add a couple of new functions for filling out cifs_fattr's from different FindFile response infolevels. Finally, remove cifs_new_inode since there are no more callers. Signed-off-by: Jeff Layton Reviewed-by: Christoph Hellwig Signed-off-by: Steve French --- fs/cifs/cifsacl.c | 26 ++-- fs/cifs/cifsglob.h | 2 + fs/cifs/cifsproto.h | 6 +- fs/cifs/inode.c | 397 ++++++++++++++++++++-------------------------------- fs/cifs/readdir.c | 350 +++++++++++---------------------------------- 5 files changed, 252 insertions(+), 529 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1403b5d86a7..6941c22398a 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -327,7 +327,7 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl) static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, struct cifs_sid *pownersid, struct cifs_sid *pgrpsid, - struct inode *inode) + struct cifs_fattr *fattr) { int i; int num_aces = 0; @@ -340,7 +340,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, if (!pdacl) { /* no DACL in the security descriptor, set all the permissions for user/group/other */ - inode->i_mode |= S_IRWXUGO; + fattr->cf_mode |= S_IRWXUGO; return; } @@ -357,7 +357,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, /* reset rwx permissions for user/group/other. Also, if num_aces is 0 i.e. DACL has no ACEs, user/group/other have no permissions */ - inode->i_mode &= ~(S_IRWXUGO); + fattr->cf_mode &= ~(S_IRWXUGO); acl_base = (char *)pdacl; acl_size = sizeof(struct cifs_acl); @@ -379,17 +379,17 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, if (compare_sids(&(ppace[i]->sid), pownersid)) access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, - &(inode->i_mode), + &fattr->cf_mode, &user_mask); if (compare_sids(&(ppace[i]->sid), pgrpsid)) access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, - &(inode->i_mode), + &fattr->cf_mode, &group_mask); if (compare_sids(&(ppace[i]->sid), &sid_everyone)) access_flags_to_mode(ppace[i]->access_req, ppace[i]->type, - &(inode->i_mode), + &fattr->cf_mode, &other_mask); /* memcpy((void *)(&(cifscred->aces[i])), @@ -464,7 +464,7 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl) /* Convert CIFS ACL to POSIX form */ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, - struct inode *inode) + struct cifs_fattr *fattr) { int rc; struct cifs_sid *owner_sid_ptr, *group_sid_ptr; @@ -472,7 +472,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, char *end_of_acl = ((char *)pntsd) + acl_len; __u32 dacloffset; - if ((inode == NULL) || (pntsd == NULL)) + if (pntsd == NULL) return -EIO; owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + @@ -497,7 +497,7 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, if (dacloffset) parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr, - group_sid_ptr, inode); + group_sid_ptr, fattr); else cFYI(1, ("no ACL")); /* BB grant all or default perms? */ @@ -508,7 +508,6 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len, memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr, sizeof(struct cifs_sid)); */ - return 0; } @@ -671,8 +670,9 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, } /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ -void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode, - const char *path, const __u16 *pfid) +void +cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, + struct inode *inode, const char *path, const __u16 *pfid) { struct cifs_ntsd *pntsd = NULL; u32 acllen = 0; @@ -687,7 +687,7 @@ void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode, /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ if (pntsd) - rc = parse_sec_desc(pntsd, acllen, inode); + rc = parse_sec_desc(pntsd, acllen, fattr); if (rc) cFYI(1, ("parse sec desc failed rc = %d", rc)); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index e6435cba811..8bcf5a4bcde 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -479,6 +479,8 @@ struct dfs_info3_param { */ #define CIFS_FATTR_DFS_REFERRAL 0x1 +#define CIFS_FATTR_DELETE_PENDING 0x2 +#define CIFS_FATTR_NEED_REVAL 0x4 struct cifs_fattr { u32 cf_flags; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 37c11c08c52..da8fbf56599 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -102,7 +102,6 @@ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, struct cifs_sb_info *cifs_sb); extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr); -extern struct inode *cifs_new_inode(struct super_block *sb, __u64 *inum); extern struct inode *cifs_iget(struct super_block *sb, struct cifs_fattr *fattr); @@ -113,8 +112,9 @@ extern int cifs_get_inode_info(struct inode **pinode, extern int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *search_path, struct super_block *sb, int xid); -extern void acl_to_uid_mode(struct cifs_sb_info *cifs_sb, struct inode *inode, - const char *path, const __u16 *pfid); +extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, + struct cifs_fattr *fattr, struct inode *inode, + const char *path, const __u16 *pfid); extern int mode_to_acl(struct inode *inode, const char *path, __u64); extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 55b616bb381..a807397f444 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -82,23 +82,34 @@ void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); - unsigned long now = jiffies; + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + unsigned long oldtime = cifs_i->time; inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; inode->i_ctime = fattr->cf_ctime; - inode->i_mode = fattr->cf_mode; inode->i_rdev = fattr->cf_rdev; inode->i_nlink = fattr->cf_nlink; inode->i_uid = fattr->cf_uid; inode->i_gid = fattr->cf_gid; + /* if dynperm is set, don't clobber existing mode */ + if (inode->i_state & I_NEW || + !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) + inode->i_mode = fattr->cf_mode; + cifs_i->cifsAttrs = fattr->cf_cifsattrs; cifs_i->uniqueid = fattr->cf_uniqueid; + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) + cifs_i->time = 0; + else + cifs_i->time = jiffies; + cFYI(1, ("inode 0x%p old_time=%ld new_time=%ld", inode, - cifs_i->time, now)); - cifs_i->time = now; + oldtime, cifs_i->time)); + + cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; /* * Can't safely change the file size here if the client is writing to @@ -219,49 +230,6 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL; } -/** - * cifs_new inode - create new inode, initialize, and hash it - * @sb - pointer to superblock - * @inum - if valid pointer and serverino is enabled, replace i_ino with val - * - * Create a new inode, initialize it for CIFS and hash it. Returns the new - * inode or NULL if one couldn't be allocated. - * - * If the share isn't mounted with "serverino" or inum is a NULL pointer then - * we'll just use the inode number assigned by new_inode(). Note that this can - * mean i_ino collisions since the i_ino assigned by new_inode is not - * guaranteed to be unique. - */ -struct inode * -cifs_new_inode(struct super_block *sb, __u64 *inum) -{ - struct inode *inode; - - inode = new_inode(sb); - if (inode == NULL) - return NULL; - - /* - * BB: Is i_ino == 0 legal? Here, we assume that it is. If it isn't we - * stop passing inum as ptr. Are there sanity checks we can use to - * ensure that the server is really filling in that field? Also, - * if serverino is disabled, perhaps we should be using iunique()? - */ - if (inum && (CIFS_SB(sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) - inode->i_ino = (unsigned long) *inum; - - /* - * must set this here instead of cifs_alloc_inode since VFS will - * clobber i_flags - */ - if (sb->s_flags & MS_NOATIME) - inode->i_flags |= S_NOATIME | S_NOCMTIME; - - insert_inode_hash(inode); - - return inode; -} - int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *full_path, struct super_block *sb, int xid) @@ -302,9 +270,9 @@ int cifs_get_inode_info_unix(struct inode **pinode, return rc; } -static int decode_sfu_inode(struct inode *inode, __u64 size, - const unsigned char *path, - struct cifs_sb_info *cifs_sb, int xid) +static int +cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, + struct cifs_sb_info *cifs_sb, int xid) { int rc; int oplock = 0; @@ -316,10 +284,15 @@ static int decode_sfu_inode(struct inode *inode, __u64 size, pbuf = buf; - if (size == 0) { - inode->i_mode |= S_IFIFO; + fattr->cf_mode &= ~S_IFMT; + + if (fattr->cf_eof == 0) { + fattr->cf_mode |= S_IFIFO; + fattr->cf_dtype = DT_FIFO; return 0; - } else if (size < 8) { + } else if (fattr->cf_eof < 8) { + fattr->cf_mode |= S_IFREG; + fattr->cf_dtype = DT_REG; return -EINVAL; /* EOPNOTSUPP? */ } @@ -331,42 +304,46 @@ static int decode_sfu_inode(struct inode *inode, __u64 size, if (rc == 0) { int buf_type = CIFS_NO_BUFFER; /* Read header */ - rc = CIFSSMBRead(xid, pTcon, - netfid, + rc = CIFSSMBRead(xid, pTcon, netfid, 24 /* length */, 0 /* offset */, &bytes_read, &pbuf, &buf_type); if ((rc == 0) && (bytes_read >= 8)) { if (memcmp("IntxBLK", pbuf, 8) == 0) { cFYI(1, ("Block device")); - inode->i_mode |= S_IFBLK; + fattr->cf_mode |= S_IFBLK; + fattr->cf_dtype = DT_BLK; if (bytes_read == 24) { /* we have enough to decode dev num */ __u64 mjr; /* major */ __u64 mnr; /* minor */ mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); - inode->i_rdev = MKDEV(mjr, mnr); + fattr->cf_rdev = MKDEV(mjr, mnr); } } else if (memcmp("IntxCHR", pbuf, 8) == 0) { cFYI(1, ("Char device")); - inode->i_mode |= S_IFCHR; + fattr->cf_mode |= S_IFCHR; + fattr->cf_dtype = DT_CHR; if (bytes_read == 24) { /* we have enough to decode dev num */ __u64 mjr; /* major */ __u64 mnr; /* minor */ mjr = le64_to_cpu(*(__le64 *)(pbuf+8)); mnr = le64_to_cpu(*(__le64 *)(pbuf+16)); - inode->i_rdev = MKDEV(mjr, mnr); + fattr->cf_rdev = MKDEV(mjr, mnr); } } else if (memcmp("IntxLNK", pbuf, 7) == 0) { cFYI(1, ("Symlink")); - inode->i_mode |= S_IFLNK; + fattr->cf_mode |= S_IFLNK; + fattr->cf_dtype = DT_LNK; } else { - inode->i_mode |= S_IFREG; /* file? */ + fattr->cf_mode |= S_IFREG; /* file? */ + fattr->cf_dtype = DT_REG; rc = -EOPNOTSUPP; } } else { - inode->i_mode |= S_IFREG; /* then it is a file */ + fattr->cf_mode |= S_IFREG; /* then it is a file */ + fattr->cf_dtype = DT_REG; rc = -EOPNOTSUPP; /* or some unknown SFU type */ } CIFSSMBClose(xid, pTcon, netfid); @@ -376,9 +353,13 @@ static int decode_sfu_inode(struct inode *inode, __u64 size, #define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */ -static int get_sfu_mode(struct inode *inode, - const unsigned char *path, - struct cifs_sb_info *cifs_sb, int xid) +/* + * Fetch mode bits as provided by SFU. + * + * FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ? + */ +static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, + struct cifs_sb_info *cifs_sb, int xid) { #ifdef CONFIG_CIFS_XATTR ssize_t rc; @@ -386,68 +367,80 @@ static int get_sfu_mode(struct inode *inode, __u32 mode; rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS", - ea_value, 4 /* size of buf */, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + ea_value, 4 /* size of buf */, cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc < 0) return (int)rc; else if (rc > 3) { mode = le32_to_cpu(*((__le32 *)ea_value)); - inode->i_mode &= ~SFBITS_MASK; - cFYI(1, ("special bits 0%o org mode 0%o", mode, inode->i_mode)); - inode->i_mode = (mode & SFBITS_MASK) | inode->i_mode; + fattr->cf_mode &= ~SFBITS_MASK; + cFYI(1, ("special bits 0%o org mode 0%o", mode, + fattr->cf_mode)); + fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode; cFYI(1, ("special mode bits 0%o", mode)); - return 0; - } else { - return 0; } + + return 0; #else return -EOPNOTSUPP; #endif } -/* - * Needed to setup inode data for the directory which is the - * junction to the new submount (ie to setup the fake directory - * which represents a DFS referral) - */ -static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat, - struct super_block *sb) +/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */ +void +cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, + struct cifs_sb_info *cifs_sb, bool adjust_tz) { - memset(pfnd_dat, 0, sizeof(FILE_ALL_INFO)); - -/* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0); - __le64 pfnd_dat->EndOfFile = cpu_to_le64(0); - __u8 pfnd_dat->DeletePending = 0; - __u8 pfnd_data->Directory = 0; - __le32 pfnd_dat->EASize = 0; - __u64 pfnd_dat->IndexNumber = 0; - __u64 pfnd_dat->IndexNumber1 = 0; */ - pfnd_dat->CreationTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->LastAccessTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->LastWriteTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->ChangeTime = - cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); - pfnd_dat->Attributes = cpu_to_le32(ATTR_DIRECTORY); - pfnd_dat->NumberOfLinks = cpu_to_le32(2); + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); + if (info->DeletePending) + fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING; + + if (info->LastAccessTime) + fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); + else + fattr->cf_atime = CURRENT_TIME; + + fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); + fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); + + if (adjust_tz) { + fattr->cf_ctime.tv_sec += cifs_sb->tcon->ses->server->timeAdj; + fattr->cf_mtime.tv_sec += cifs_sb->tcon->ses->server->timeAdj; + } + + fattr->cf_eof = le64_to_cpu(info->EndOfFile); + fattr->cf_bytes = le64_to_cpu(info->AllocationSize); + + if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { + fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; + fattr->cf_dtype = DT_DIR; + } else { + fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; + fattr->cf_dtype = DT_REG; + } + + /* clear write bits if ATTR_READONLY is set */ + if (fattr->cf_cifsattrs & ATTR_READONLY) + fattr->cf_mode &= ~(S_IWUGO); + + fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); + + fattr->cf_uid = cifs_sb->mnt_uid; + fattr->cf_gid = cifs_sb->mnt_gid; } int cifs_get_inode_info(struct inode **pinode, const unsigned char *full_path, FILE_ALL_INFO *pfindData, struct super_block *sb, int xid, const __u16 *pfid) { - int rc = 0; - __u32 attr; - struct cifsInodeInfo *cifsInfo; + int rc = 0, tmprc; struct cifsTconInfo *pTcon; - struct inode *inode; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); char *buf = NULL; bool adjustTZ = false; - bool is_dfs_referral = false; - umode_t default_mode; + struct cifs_fattr fattr; pTcon = cifs_sb->tcon; cFYI(1, ("Getting info on %s", full_path)); @@ -482,163 +475,82 @@ int cifs_get_inode_info(struct inode **pinode, adjustTZ = true; } } - /* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */ - if (rc == -EREMOTE) { - is_dfs_referral = true; - fill_fake_finddata(pfindData, sb); + + if (!rc) { + cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData, + cifs_sb, adjustTZ); + } else if (rc == -EREMOTE) { + cifs_create_dfs_fattr(&fattr, sb); rc = 0; - } else if (rc) + } else { goto cgii_exit; + } - attr = le32_to_cpu(pfindData->Attributes); - - /* get new inode */ + /* + * If an inode wasn't passed in, then get the inode number + * + * Is an i_ino of zero legal? Can we use that to check if the server + * supports returning inode numbers? Are there other sanity checks we + * can use to ensure that the server is really filling in that field? + * + * We can not use the IndexNumber field by default from Windows or + * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA + * CIFS spec claims that this value is unique within the scope of a + * share, and the windows docs hint that it's actually unique + * per-machine. + * + * There may be higher info levels that work but are there Windows + * server or network appliances for which IndexNumber field is not + * guaranteed unique? + */ if (*pinode == NULL) { - __u64 inode_num; - __u64 *pinum = &inode_num; - - /* Is an i_ino of zero legal? Can we use that to check - if the server supports returning inode numbers? Are - there other sanity checks we can use to ensure that - the server is really filling in that field? */ - - /* We can not use the IndexNumber field by default from - Windows or Samba (in ALL_INFO buf) but we can request - it explicitly. It may not be unique presumably if - the server has multiple devices mounted under one share */ - - /* There may be higher info levels that work but are - there Windows server or network appliances for which - IndexNumber field is not guaranteed unique? */ - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { int rc1 = 0; rc1 = CIFSGetSrvInodeNumber(xid, pTcon, - full_path, pinum, + full_path, &fattr.cf_uniqueid, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc1) { - cFYI(1, ("GetSrvInodeNum rc %d", rc1)); - pinum = NULL; /* BB EOPNOSUPP disable SERVER_INUM? */ + cFYI(1, ("GetSrvInodeNum rc %d", rc1)); + fattr.cf_uniqueid = iunique(sb, ROOT_I); } } else { - pinum = NULL; + fattr.cf_uniqueid = iunique(sb, ROOT_I); } - - *pinode = cifs_new_inode(sb, pinum); - if (*pinode == NULL) { - rc = -ENOMEM; - goto cgii_exit; - } - } - inode = *pinode; - cifsInfo = CIFS_I(inode); - cifsInfo->cifsAttrs = attr; - cifsInfo->delete_pending = pfindData->DeletePending ? true : false; - cFYI(1, ("Old time %ld", cifsInfo->time)); - cifsInfo->time = jiffies; - cFYI(1, ("New time %ld", cifsInfo->time)); - - /* blksize needs to be multiple of two. So safer to default to - blksize and blkbits set in superblock so 2**blkbits and blksize - will match rather than setting to: - (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/ - - /* Linux can not store file creation time so ignore it */ - if (pfindData->LastAccessTime) - inode->i_atime = cifs_NTtimeToUnix(pfindData->LastAccessTime); - else /* do not need to use current_fs_time - time not stored */ - inode->i_atime = CURRENT_TIME; - inode->i_mtime = cifs_NTtimeToUnix(pfindData->LastWriteTime); - inode->i_ctime = cifs_NTtimeToUnix(pfindData->ChangeTime); - cFYI(DBG2, ("Attributes came in as 0x%x", attr)); - if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) { - inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj; - inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj; - } - - /* get default inode mode */ - if (attr & ATTR_DIRECTORY) - default_mode = cifs_sb->mnt_dir_mode; - else - default_mode = cifs_sb->mnt_file_mode; - - /* set permission bits */ - if (atomic_read(&cifsInfo->inUse) == 0 || - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) - inode->i_mode = default_mode; - else { - /* just reenable write bits if !ATTR_READONLY */ - if ((inode->i_mode & S_IWUGO) == 0 && - (attr & ATTR_READONLY) == 0) - inode->i_mode |= (S_IWUGO & default_mode); - - inode->i_mode &= ~S_IFMT; - } - /* clear write bits if ATTR_READONLY is set */ - if (attr & ATTR_READONLY) - inode->i_mode &= ~S_IWUGO; - - /* set inode type */ - if ((attr & ATTR_SYSTEM) && - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { - /* no need to fix endianness on 0 */ - if (pfindData->EndOfFile == 0) - inode->i_mode |= S_IFIFO; - else if (decode_sfu_inode(inode, - le64_to_cpu(pfindData->EndOfFile), - full_path, cifs_sb, xid)) - cFYI(1, ("unknown SFU file type\n")); } else { - if (attr & ATTR_DIRECTORY) - inode->i_mode |= S_IFDIR; - else - inode->i_mode |= S_IFREG; + fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid; } - cifsInfo->server_eof = le64_to_cpu(pfindData->EndOfFile); - spin_lock(&inode->i_lock); - if (is_size_safe_to_change(cifsInfo, cifsInfo->server_eof)) { - /* can not safely shrink the file size here if the - client is writing to it due to potential races */ - i_size_write(inode, cifsInfo->server_eof); - - /* 512 bytes (2**9) is the fake blocksize that must be - used for this calculation */ - inode->i_blocks = (512 - 1 + le64_to_cpu( - pfindData->AllocationSize)) >> 9; + /* query for SFU type info if supported and needed */ + if (fattr.cf_cifsattrs & ATTR_SYSTEM && + cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { + tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid); + if (tmprc) + cFYI(1, ("cifs_sfu_type failed: %d", tmprc)); } - spin_unlock(&inode->i_lock); - inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks); - - /* BB fill in uid and gid here? with help from winbind? - or retrieve from NTFS stream extended attribute */ #ifdef CONFIG_CIFS_EXPERIMENTAL /* fill in 0777 bits from ACL */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { cFYI(1, ("Getting mode bits from ACL")); - acl_to_uid_mode(cifs_sb, inode, full_path, pfid); + cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid); } #endif - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { - /* fill in remaining high mode bits e.g. SUID, VTX */ - get_sfu_mode(inode, full_path, cifs_sb, xid); - } else if (atomic_read(&cifsInfo->inUse) == 0) { - inode->i_uid = cifs_sb->mnt_uid; - inode->i_gid = cifs_sb->mnt_gid; - /* set so we do not keep refreshing these fields with - bad data after user has changed them in memory */ - atomic_set(&cifsInfo->inUse, 1); - } - - cifs_set_ops(inode, is_dfs_referral); - + /* fill in remaining high mode bits e.g. SUID, VTX */ + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) + cifs_sfu_mode(&fattr, full_path, cifs_sb, xid); + if (!*pinode) { + *pinode = cifs_iget(sb, &fattr); + if (!*pinode) + rc = -ENOMEM; + } else { + cifs_fattr_to_inode(*pinode, &fattr); + } cgii_exit: kfree(buf); @@ -753,21 +665,14 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) return ERR_PTR(-ENOMEM); xid = GetXid(); - if (cifs_sb->tcon->unix_ext) { + if (cifs_sb->tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); - if (!inode) - return ERR_PTR(-ENOMEM); - } else { - inode = iget_locked(sb, ino); - if (!inode) - return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) - return inode; - - rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, + else + rc = cifs_get_inode_info(&inode, full_path, NULL, sb, xid, NULL); - unlock_new_inode(inode); - } + + if (!inode) + return ERR_PTR(-ENOMEM); if (rc && cifs_sb->tcon->ipc) { cFYI(1, ("ipc connection - fake read inode")); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 231aa6953f8..f823a4a208a 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -112,239 +112,74 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, return dentry; } -/* Returns 1 if new inode created, 2 if both dentry and inode were */ -/* Might check in the future if inode number changed so we can rehash inode */ -static int -construct_dentry(struct qstr *qstring, struct file *file, - struct inode **ptmp_inode, struct dentry **pnew_dentry, - __u64 *inum) +static void +cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { - struct dentry *tmp_dentry = NULL; - struct super_block *sb = file->f_path.dentry->d_sb; - int rc = 0; + fattr->cf_uid = cifs_sb->mnt_uid; + fattr->cf_gid = cifs_sb->mnt_gid; - cFYI(1, ("For %s", qstring->name)); - - tmp_dentry = d_lookup(file->f_path.dentry, qstring); - if (tmp_dentry) { - /* BB: overwrite old name? i.e. tmp_dentry->d_name and - * tmp_dentry->d_name.len?? - */ - cFYI(0, ("existing dentry with inode 0x%p", - tmp_dentry->d_inode)); - *ptmp_inode = tmp_dentry->d_inode; - if (*ptmp_inode == NULL) { - *ptmp_inode = cifs_new_inode(sb, inum); - if (*ptmp_inode == NULL) - return rc; - rc = 1; - } + if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { + fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; + fattr->cf_dtype = DT_DIR; } else { - tmp_dentry = d_alloc(file->f_path.dentry, qstring); - if (tmp_dentry == NULL) { - cERROR(1, ("Failed allocating dentry")); - *ptmp_inode = NULL; - return rc; - } - - if (CIFS_SB(sb)->tcon->nocase) - tmp_dentry->d_op = &cifs_ci_dentry_ops; - else - tmp_dentry->d_op = &cifs_dentry_ops; - - *ptmp_inode = cifs_new_inode(sb, inum); - if (*ptmp_inode == NULL) - return rc; - rc = 2; - } - - tmp_dentry->d_time = jiffies; - *pnew_dentry = tmp_dentry; - return rc; -} - -static void fill_in_inode(struct inode *tmp_inode, int new_buf_type, - char *buf, unsigned int *pobject_type, int isNewInode) -{ - loff_t local_size; - struct timespec local_mtime; - - struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb); - __u32 attr; - __u64 allocation_size; - __u64 end_of_file; - umode_t default_mode; - - /* save mtime and size */ - local_mtime = tmp_inode->i_mtime; - local_size = tmp_inode->i_size; - - if (new_buf_type) { - FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf; - - attr = le32_to_cpu(pfindData->ExtFileAttributes); - allocation_size = le64_to_cpu(pfindData->AllocationSize); - end_of_file = le64_to_cpu(pfindData->EndOfFile); - tmp_inode->i_atime = - cifs_NTtimeToUnix(pfindData->LastAccessTime); - tmp_inode->i_mtime = - cifs_NTtimeToUnix(pfindData->LastWriteTime); - tmp_inode->i_ctime = - cifs_NTtimeToUnix(pfindData->ChangeTime); - } else { /* legacy, OS2 and DOS style */ - int offset = cifs_sb->tcon->ses->server->timeAdj; - FIND_FILE_STANDARD_INFO *pfindData = - (FIND_FILE_STANDARD_INFO *)buf; - - tmp_inode->i_mtime = cnvrtDosUnixTm(pfindData->LastWriteDate, - pfindData->LastWriteTime, - offset); - tmp_inode->i_atime = cnvrtDosUnixTm(pfindData->LastAccessDate, - pfindData->LastAccessTime, - offset); - tmp_inode->i_ctime = cnvrtDosUnixTm(pfindData->LastWriteDate, - pfindData->LastWriteTime, - offset); - attr = le16_to_cpu(pfindData->Attributes); - allocation_size = le32_to_cpu(pfindData->AllocationSize); - end_of_file = le32_to_cpu(pfindData->DataSize); - } - - /* Linux can not store file creation time unfortunately so ignore it */ - - cifsInfo->cifsAttrs = attr; -#ifdef CONFIG_CIFS_EXPERIMENTAL - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { - /* get more accurate mode via ACL - so force inode refresh */ - cifsInfo->time = 0; - } else -#endif /* CONFIG_CIFS_EXPERIMENTAL */ - cifsInfo->time = jiffies; - - /* treat dos attribute of read-only as read-only mode bit e.g. 555? */ - /* 2767 perms - indicate mandatory locking */ - /* BB fill in uid and gid here? with help from winbind? - or retrieve from NTFS stream extended attribute */ - if (atomic_read(&cifsInfo->inUse) == 0) { - tmp_inode->i_uid = cifs_sb->mnt_uid; - tmp_inode->i_gid = cifs_sb->mnt_gid; + fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; + fattr->cf_dtype = DT_REG; } - if (attr & ATTR_DIRECTORY) - default_mode = cifs_sb->mnt_dir_mode; - else - default_mode = cifs_sb->mnt_file_mode; - - /* set initial permissions */ - if ((atomic_read(&cifsInfo->inUse) == 0) || - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0) - tmp_inode->i_mode = default_mode; - else { - /* just reenable write bits if !ATTR_READONLY */ - if ((tmp_inode->i_mode & S_IWUGO) == 0 && - (attr & ATTR_READONLY) == 0) - tmp_inode->i_mode |= (S_IWUGO & default_mode); - - tmp_inode->i_mode &= ~S_IFMT; - } - - /* clear write bits if ATTR_READONLY is set */ - if (attr & ATTR_READONLY) - tmp_inode->i_mode &= ~S_IWUGO; + if (fattr->cf_cifsattrs & ATTR_READONLY) + fattr->cf_mode &= ~S_IWUGO; - /* set inode type */ - if ((attr & ATTR_SYSTEM) && - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) { - if (end_of_file == 0) { - tmp_inode->i_mode |= S_IFIFO; - *pobject_type = DT_FIFO; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL && + fattr->cf_cifsattrs & ATTR_SYSTEM) { + if (fattr->cf_eof == 0) { + fattr->cf_mode &= ~S_IFMT; + fattr->cf_mode |= S_IFIFO; + fattr->cf_dtype = DT_FIFO; } else { /* - * trying to get the type can be slow, so just call - * this a regular file for now, and mark for reval + * trying to get the type and mode via SFU can be slow, + * so just call those regular files for now, and mark + * for reval */ - tmp_inode->i_mode |= S_IFREG; - *pobject_type = DT_REG; - cifsInfo->time = 0; - } - } else { - if (attr & ATTR_DIRECTORY) { - tmp_inode->i_mode |= S_IFDIR; - *pobject_type = DT_DIR; - } else { - tmp_inode->i_mode |= S_IFREG; - *pobject_type = DT_REG; + fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; } } +} - /* can not fill in nlink here as in qpathinfo version and Unx search */ - if (atomic_read(&cifsInfo->inUse) == 0) - atomic_set(&cifsInfo->inUse, 1); +void +cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, + struct cifs_sb_info *cifs_sb) +{ + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes); + fattr->cf_eof = le64_to_cpu(info->EndOfFile); + fattr->cf_bytes = le64_to_cpu(info->AllocationSize); + fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); + fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); + fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); + + cifs_fill_common_info(fattr, cifs_sb); +} - cifsInfo->server_eof = end_of_file; - spin_lock(&tmp_inode->i_lock); - if (is_size_safe_to_change(cifsInfo, end_of_file)) { - /* can not safely change the file size here if the - client is writing to it due to potential races */ - i_size_write(tmp_inode, end_of_file); +void +cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, + struct cifs_sb_info *cifs_sb) +{ + int offset = cifs_sb->tcon->ses->server->timeAdj; - /* 512 bytes (2**9) is the fake blocksize that must be used */ - /* for this calculation, even though the reported blocksize is larger */ - tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9; - } - spin_unlock(&tmp_inode->i_lock); - - if (allocation_size < end_of_file) - cFYI(1, ("May be sparse file, allocation less than file size")); - cFYI(1, ("File Size %ld and blocks %llu", - (unsigned long)tmp_inode->i_size, - (unsigned long long)tmp_inode->i_blocks)); - if (S_ISREG(tmp_inode->i_mode)) { - cFYI(1, ("File inode")); - tmp_inode->i_op = &cifs_file_inode_ops; - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) - tmp_inode->i_fop = &cifs_file_direct_nobrl_ops; - else - tmp_inode->i_fop = &cifs_file_direct_ops; - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) - tmp_inode->i_fop = &cifs_file_nobrl_ops; - else - tmp_inode->i_fop = &cifs_file_ops; - - if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) && - (cifs_sb->tcon->ses->server->maxBuf < - PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)) - tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf; - else - tmp_inode->i_data.a_ops = &cifs_addr_ops; - - if (isNewInode) - return; /* No sense invalidating pages for new inode - since have not started caching readahead file - data yet */ - - if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) && - (local_size == tmp_inode->i_size)) { - cFYI(1, ("inode exists but unchanged")); - } else { - /* file may have changed on server */ - cFYI(1, ("invalidate inode, readdir detected change")); - invalidate_remote_inode(tmp_inode); - } - } else if (S_ISDIR(tmp_inode->i_mode)) { - cFYI(1, ("Directory inode")); - tmp_inode->i_op = &cifs_dir_inode_ops; - tmp_inode->i_fop = &cifs_dir_ops; - } else if (S_ISLNK(tmp_inode->i_mode)) { - cFYI(1, ("Symbolic Link inode")); - tmp_inode->i_op = &cifs_symlink_inode_ops; - } else { - cFYI(1, ("Init special inode")); - init_special_inode(tmp_inode, tmp_inode->i_mode, - tmp_inode->i_rdev); - } + memset(fattr, 0, sizeof(*fattr)); + fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate, + info->LastAccessTime, offset); + fattr->cf_ctime = cnvrtDosUnixTm(info->LastWriteDate, + info->LastWriteTime, offset); + fattr->cf_mtime = cnvrtDosUnixTm(info->LastWriteDate, + info->LastWriteTime, offset); + + fattr->cf_cifsattrs = le16_to_cpu(info->Attributes); + fattr->cf_bytes = le32_to_cpu(info->AllocationSize); + fattr->cf_eof = le32_to_cpu(info->DataSize); + + cifs_fill_common_info(fattr, cifs_sb); } /* BB eventually need to add the following helper function to @@ -846,11 +681,10 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, int rc = 0; struct qstr qstring; struct cifsFileInfo *pCifsF; - unsigned int obj_type; - __u64 inum; + u64 inum; ino_t ino; + struct super_block *sb; struct cifs_sb_info *cifs_sb; - struct inode *tmp_inode; struct dentry *tmp_dentry; struct cifs_fattr fattr; @@ -870,71 +704,53 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir, if (rc != 0) return 0; - cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); + sb = file->f_path.dentry->d_sb; + cifs_sb = CIFS_SB(sb); qstring.name = scratch_buf; rc = cifs_get_name_from_search_buf(&qstring, pfindEntry, pCifsF->srch_inf.info_level, pCifsF->srch_inf.unicode, cifs_sb, - max_len, - &inum /* returned */); + max_len, &inum /* returned */); if (rc) return rc; - /* only these two infolevels return valid inode numbers */ - if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) { + if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) cifs_unix_basic_to_fattr(&fattr, &((FILE_UNIX_INFO *) pfindEntry)->basic, cifs_sb); - tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, - &fattr); - obj_type = fattr.cf_dtype; - ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); - } else { - if (pCifsF->srch_inf.info_level == - SMB_FIND_FILE_ID_FULL_DIR_INFO) - rc = construct_dentry(&qstring, file, &tmp_inode, - &tmp_dentry, &inum); - else - rc = construct_dentry(&qstring, file, &tmp_inode, - &tmp_dentry, NULL); - - if ((tmp_inode == NULL) || (tmp_dentry == NULL)) { - rc = -ENOMEM; - goto out; - } - - /* we pass in rc below, indicating whether it is a new inode, - * so we can figure out whether to invalidate the inode cached - * data if the file has changed - */ - if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) - fill_in_inode(tmp_inode, 0, pfindEntry, &obj_type, rc); - else - fill_in_inode(tmp_inode, 1, pfindEntry, &obj_type, rc); + else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) + cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *) + pfindEntry, cifs_sb); + else + cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *) + pfindEntry, cifs_sb); - /* new inode - needs to be tied to dentry */ - if (rc) { - d_instantiate(tmp_dentry, tmp_inode); - if (rc == 2) - d_rehash(tmp_dentry); - } + /* FIXME: make _to_fattr functions fill this out */ + if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_ID_FULL_DIR_INFO) + fattr.cf_uniqueid = inum; + else + fattr.cf_uniqueid = iunique(sb, ROOT_I); - ino = cifs_uniqueid_to_ino_t(tmp_inode->i_ino); - } + ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); + tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr); rc = filldir(direntry, qstring.name, qstring.len, file->f_pos, - ino, obj_type); + ino, fattr.cf_dtype); + + /* + * we can not return filldir errors to the caller since they are + * "normal" when the stat blocksize is too small - we return remapped + * error instead + * + * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above + * case already. Why should we be clobbering other errors from it? + */ if (rc) { cFYI(1, ("filldir rc = %d", rc)); - /* we can not return filldir errors to the caller - since they are "normal" when the stat blocksize - is too small - we return remapped error instead */ rc = -EOVERFLOW; } - -out: dput(tmp_dentry); return rc; } -- cgit v1.2.3 From aeaaf253c4dee7ff9af2f3f0595f3bb66964e944 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 01:46:39 -0400 Subject: cifs: remove cifsInodeInfo->inUse counter cifs: remove cifsInodeInfo->inUse counter It was purported to be a refcounter of some sort, but was never used that way. It never served any purpose that wasn't served equally well by the I_NEW flag. Signed-off-by: Jeff Layton Reviewed-by: Christoph Hellwig Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 1 - fs/cifs/cifsglob.h | 1 - 2 files changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 9f669f982c4..44f30504b82 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -308,7 +308,6 @@ cifs_alloc_inode(struct super_block *sb) if (!cifs_inode) return NULL; cifs_inode->cifsAttrs = 0x20; /* default */ - atomic_set(&cifs_inode->inUse, 0); cifs_inode->time = 0; cifs_inode->write_behind_rc = 0; /* Until the file is open and we have gotten oplock diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 8bcf5a4bcde..63f6cdfa563 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -364,7 +364,6 @@ struct cifsInodeInfo { struct list_head openFileList; int write_behind_rc; __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ - atomic_t inUse; /* num concurrent users (local openers cifs) of file*/ unsigned long time; /* jiffies of last update/check of inode */ bool clientCanCacheRead:1; /* read oplock */ bool clientCanCacheAll:1; /* read and writebehind oplock */ -- cgit v1.2.3 From d0c280d26de9422c9c943f8f486b9830cd9bea70 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 9 Jul 2009 01:46:44 -0400 Subject: cifs: when ATTR_READONLY is set, only clear write bits on non-directories cifs: when ATTR_READONLY is set, only clear write bits on non-directories On windows servers, ATTR_READONLY apparently either has no meaning or serves as some sort of queue to certain applications for unrelated behavior. This MS kbase article has details: http://support.microsoft.com/kb/326549/ Don't clear the write bits directory mode when ATTR_READONLY is set. Reported-by: pouchat@peewiki.net Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/inode.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index a807397f444..18afe57b246 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -419,11 +419,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, } else { fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode; fattr->cf_dtype = DT_REG; - } - /* clear write bits if ATTR_READONLY is set */ - if (fattr->cf_cifsattrs & ATTR_READONLY) - fattr->cf_mode &= ~(S_IWUGO); + /* clear write bits if ATTR_READONLY is set */ + if (fattr->cf_cifsattrs & ATTR_READONLY) + fattr->cf_mode &= ~(S_IWUGO); + } fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); -- cgit v1.2.3 From 65bc98b0059360e458aebd208587be44641227c1 Mon Sep 17 00:00:00 2001 From: Steve French Date: Fri, 10 Jul 2009 15:27:25 +0000 Subject: [CIFS] Distinguish posix opens and mkdirs from legacy mkdirs in stats Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 8 +++++++- fs/cifs/cifsglob.h | 2 ++ fs/cifs/cifssmb.c | 5 ++++- 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 7f19fefd3d4..42cec2a7c0c 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -261,6 +261,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, atomic_set(&tcon->num_reads, 0); atomic_set(&tcon->num_oplock_brks, 0); atomic_set(&tcon->num_opens, 0); + atomic_set(&tcon->num_posixopens, 0); + atomic_set(&tcon->num_posixmkdirs, 0); atomic_set(&tcon->num_closes, 0); atomic_set(&tcon->num_deletes, 0); atomic_set(&tcon->num_mkdirs, 0); @@ -347,11 +349,15 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) atomic_read(&tcon->num_locks), atomic_read(&tcon->num_hardlinks), atomic_read(&tcon->num_symlinks)); - seq_printf(m, "\nOpens: %d Closes: %d" + seq_printf(m, "\nOpens: %d Closes: %d " "Deletes: %d", atomic_read(&tcon->num_opens), atomic_read(&tcon->num_closes), atomic_read(&tcon->num_deletes)); + seq_printf(m, "\nPosix Opens: %d " + "Posix Mkdirs: %d", + atomic_read(&tcon->num_posixopens), + atomic_read(&tcon->num_posixmkdirs)); seq_printf(m, "\nMkdirs: %d Rmdirs: %d", atomic_read(&tcon->num_mkdirs), atomic_read(&tcon->num_rmdirs)); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 63f6cdfa563..6084d6379c0 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -260,6 +260,8 @@ struct cifsTconInfo { atomic_t num_closes; atomic_t num_deletes; atomic_t num_mkdirs; + atomic_t num_posixopens; + atomic_t num_posixmkdirs; atomic_t num_rmdirs; atomic_t num_renames; atomic_t num_t2renames; diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 922f5fe2084..1866bc2927d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1113,7 +1113,10 @@ PsxCreat: psx_create_err: cifs_buf_release(pSMB); - cifs_stats_inc(&tcon->num_mkdirs); + if (posix_flags & SMB_O_DIRECTORY) + cifs_stats_inc(&tcon->num_posixmkdirs); + else + cifs_stats_inc(&tcon->num_posixopens); if (rc == -EAGAIN) goto PsxCreat; -- cgit v1.2.3 From 8aa7e847d834ed937a9ad37a0f2ad5b8584c1ab0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 9 Jul 2009 14:52:32 +0200 Subject: Fix congestion_wait() sync/async vs read/write confusion Commit 1faa16d22877f4839bd433547d770c676d1d964c accidentally broke the bdi congestion wait queue logic, causing us to wait on congestion for WRITE (== 1) when we really wanted BLK_RW_ASYNC (== 0) instead. Signed-off-by: Jens Axboe --- fs/fat/file.c | 2 +- fs/fuse/dev.c | 8 ++++---- fs/nfs/write.c | 8 +++++--- fs/reiserfs/journal.c | 2 +- fs/xfs/linux-2.6/kmem.c | 4 ++-- fs/xfs/linux-2.6/xfs_buf.c | 2 +- 6 files changed, 14 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/fat/file.c b/fs/fat/file.c index b28ea646ff6..f042b965c95 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -134,7 +134,7 @@ static int fat_file_release(struct inode *inode, struct file *filp) if ((filp->f_mode & FMODE_WRITE) && MSDOS_SB(inode->i_sb)->options.flush) { fat_flush_inodes(inode->i_sb, inode, NULL); - congestion_wait(WRITE, HZ/10); + congestion_wait(BLK_RW_ASYNC, HZ/10); } return 0; } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index f58ecbc416c..6484eb75acd 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -286,8 +286,8 @@ __releases(&fc->lock) } if (fc->num_background == FUSE_CONGESTION_THRESHOLD && fc->connected && fc->bdi_initialized) { - clear_bdi_congested(&fc->bdi, READ); - clear_bdi_congested(&fc->bdi, WRITE); + clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); + clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); } fc->num_background--; fc->active_background--; @@ -414,8 +414,8 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc, fc->blocked = 1; if (fc->num_background == FUSE_CONGESTION_THRESHOLD && fc->bdi_initialized) { - set_bdi_congested(&fc->bdi, READ); - set_bdi_congested(&fc->bdi, WRITE); + set_bdi_congested(&fc->bdi, BLK_RW_SYNC); + set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); } list_add_tail(&req->list, &fc->bg_queue); flush_bg_queue(fc); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ce728829f79..0a0a2ff767c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -202,8 +202,10 @@ static int nfs_set_page_writeback(struct page *page) struct nfs_server *nfss = NFS_SERVER(inode); if (atomic_long_inc_return(&nfss->writeback) > - NFS_CONGESTION_ON_THRESH) - set_bdi_congested(&nfss->backing_dev_info, WRITE); + NFS_CONGESTION_ON_THRESH) { + set_bdi_congested(&nfss->backing_dev_info, + BLK_RW_ASYNC); + } } return ret; } @@ -215,7 +217,7 @@ static void nfs_end_page_writeback(struct page *page) end_page_writeback(page); if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) - clear_bdi_congested(&nfss->backing_dev_info, WRITE); + clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } /* diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 77f5bb746bf..90622200b39 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -997,7 +997,7 @@ static int reiserfs_async_progress_wait(struct super_block *s) DEFINE_WAIT(wait); struct reiserfs_journal *j = SB_JOURNAL(s); if (atomic_read(&j->j_async_throttle)) - congestion_wait(WRITE, HZ / 10); + congestion_wait(BLK_RW_ASYNC, HZ / 10); return 0; } diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index 1cd3b55ee3d..2d3f90afe5f 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c @@ -53,7 +53,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags) printk(KERN_ERR "XFS: possible memory allocation " "deadlock in %s (mode:0x%x)\n", __func__, lflags); - congestion_wait(WRITE, HZ/50); + congestion_wait(BLK_RW_ASYNC, HZ/50); } while (1); } @@ -130,7 +130,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) printk(KERN_ERR "XFS: possible memory allocation " "deadlock in %s (mode:0x%x)\n", __func__, lflags); - congestion_wait(WRITE, HZ/50); + congestion_wait(BLK_RW_ASYNC, HZ/50); } while (1); } diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 1418b916fc2..0c93c7ef3d1 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -412,7 +412,7 @@ _xfs_buf_lookup_pages( XFS_STATS_INC(xb_page_retries); xfsbufd_wakeup(0, gfp_mask); - congestion_wait(WRITE, HZ/50); + congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } -- cgit v1.2.3 From ecb554a846f8e9d2a58f6d6c118168a63ac065aa Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 9 Jul 2009 14:46:53 +0200 Subject: block: fix sg SG_DXFER_TO_FROM_DEV regression I overlooked SG_DXFER_TO_FROM_DEV support when I converted sg to use the block layer mapping API (2.6.28). Douglas Gilbert explained SG_DXFER_TO_FROM_DEV: http://www.spinics.net/lists/linux-scsi/msg37135.html = The semantics of SG_DXFER_TO_FROM_DEV were: - copy user space buffer to kernel (LLD) buffer - do SCSI command which is assumed to be of the DATA_IN (data from device) variety. This would overwrite some or all of the kernel buffer - copy kernel (LLD) buffer back to the user space. The idea was to detect short reads by filling the original user space buffer with some marker bytes ("0xec" it would seem in this report). The "resid" value is a better way of detecting short reads but that was only added this century and requires co-operation from the LLD. = This patch changes the block layer mapping API to support this semantics. This simply adds another field to struct rq_map_data and enables __bio_copy_iov() to copy data from user space even with READ requests. It's better to add the flags field and kills null_mapped and the new from_user fields in struct rq_map_data but that approach makes it difficult to send this patch to stable trees because st and osst drivers use struct rq_map_data (they were converted to use the block layer in 2.6.29 and 2.6.30). Well, I should clean up the block layer mapping API. zhou sf reported this regiression and tested this patch: http://www.spinics.net/lists/linux-scsi/msg37128.html http://www.spinics.net/lists/linux-scsi/msg37168.html Reported-by: zhou sf Tested-by: zhou sf Cc: stable@kernel.org Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- fs/bio.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index 1486b19fc43..76738005c8e 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -705,14 +705,13 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, } static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, - struct sg_iovec *iov, int iov_count, int uncopy, - int do_free_page) + struct sg_iovec *iov, int iov_count, + int to_user, int from_user, int do_free_page) { int ret = 0, i; struct bio_vec *bvec; int iov_idx = 0; unsigned int iov_off = 0; - int read = bio_data_dir(bio) == READ; __bio_for_each_segment(bvec, bio, i, 0) { char *bv_addr = page_address(bvec->bv_page); @@ -727,13 +726,14 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, iov_addr = iov[iov_idx].iov_base + iov_off; if (!ret) { - if (!read && !uncopy) - ret = copy_from_user(bv_addr, iov_addr, - bytes); - if (read && uncopy) + if (to_user) ret = copy_to_user(iov_addr, bv_addr, bytes); + if (from_user) + ret = copy_from_user(bv_addr, iov_addr, + bytes); + if (ret) ret = -EFAULT; } @@ -770,7 +770,8 @@ int bio_uncopy_user(struct bio *bio) if (!bio_flagged(bio, BIO_NULL_MAPPED)) ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, - bmd->nr_sgvecs, 1, bmd->is_our_pages); + bmd->nr_sgvecs, bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); bio_free_map_data(bmd); bio_put(bio); return ret; @@ -875,8 +876,9 @@ struct bio *bio_copy_user_iov(struct request_queue *q, /* * success */ - if (!write_to_vm && (!map_data || !map_data->null_mapped)) { - ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0); + if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || + (map_data && map_data->from_user)) { + ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0); if (ret) goto cleanup; } -- cgit v1.2.3 From 097041e576ee3a50d92dd643ee8ca65bf6a62e21 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 10 Jul 2009 20:06:42 -0500 Subject: fuse: Fix build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When building v2.6.31-rc2-344-g69ca06c, the following build errors are found due to missing includes: CC [M] fs/fuse/dev.o fs/fuse/dev.c: In function ‘request_end’: fs/fuse/dev.c:289: error: ‘BLK_RW_SYNC’ undeclared (first use in this function) ... fs/nfs/write.c: In function ‘nfs_set_page_writeback’: fs/nfs/write.c:207: error: ‘BLK_RW_ASYNC’ undeclared (first use in this function) Signed-off-by: Larry Finger@lwfinger.net> Signed-off-by: Linus Torvalds --- fs/fuse/dev.c | 1 + fs/nfs/write.c | 1 + 2 files changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6484eb75acd..cbceacbc0bf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -16,6 +16,7 @@ #include #include #include +#include MODULE_ALIAS_MISCDEV(FUSE_MINOR); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0a0a2ff767c..35d81316163 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -19,6 +19,7 @@ #include #include #include +#include #include -- cgit v1.2.3 From 8711c67bee675b4f7a378c71ad5a59c981ec3df0 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 10 Jul 2009 12:34:27 +0200 Subject: isofs: fix Joliet regression commit 5404ac8e4418ab3d254950ee4f9bcafc1da20b4a ("isofs: cleanup mount option processing") missed conversion of joliet option flag resulting in non-working Joliet support. CC: walt Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Jan Kara Signed-off-by: Linus Torvalds --- fs/isofs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 58a7963e168..85f96bc651c 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -142,6 +142,7 @@ static const struct dentry_operations isofs_dentry_ops[] = { struct iso9660_options{ unsigned int rock:1; + unsigned int joliet:1; unsigned int cruft:1; unsigned int hide:1; unsigned int showassoc:1; @@ -151,7 +152,6 @@ struct iso9660_options{ unsigned int gid_set:1; unsigned int utf8:1; unsigned char map; - char joliet; unsigned char check; unsigned int blocksize; mode_t fmode; @@ -632,7 +632,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) { sec = (struct iso_supplementary_descriptor *)vdp; if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) { - if (opt.joliet == 'y') { + if (opt.joliet) { if (sec->escape[2] == 0x40) joliet_level = 1; else if (sec->escape[2] == 0x43) -- cgit v1.2.3 From 81e4e1ba7ed4a1fdcf0e2ee944f1575010471464 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 11 Jul 2009 11:22:34 -0700 Subject: Revert "fuse: Fix build error" as unnecessary This reverts commit 097041e576ee3a50d92dd643ee8ca65bf6a62e21. Trond had a better fix, which is the parent of this one ("Fix compile error due to congestion_wait() changes") Requested-by: Trond Myklebust Acked-by: Larry Finger Signed-off-by: Linus Torvalds --- fs/fuse/dev.c | 1 - fs/nfs/write.c | 1 - 2 files changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cbceacbc0bf..6484eb75acd 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -16,7 +16,6 @@ #include #include #include -#include MODULE_ALIAS_MISCDEV(FUSE_MINOR); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 35d81316163..0a0a2ff767c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -19,7 +19,6 @@ #include #include #include -#include #include -- cgit v1.2.3 From 405f55712dfe464b3240d7816cc4fe4174831be2 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 11 Jul 2009 22:08:37 +0400 Subject: headers: smp_lock.h redux * Remove smp_lock.h from files which don't need it (including some headers!) * Add smp_lock.h to files which do need it * Make smp_lock.h include conditional in hardirq.h It's needed only for one kernel_locked() usage which is under CONFIG_PREEMPT This will make hardirq.h inclusion cheaper for every PREEMPT=n config (which includes allmodconfig/allyesconfig, BTW) Signed-off-by: Alexey Dobriyan Signed-off-by: Linus Torvalds --- fs/adfs/super.c | 1 + fs/afs/super.c | 1 + fs/autofs4/dev-ioctl.c | 1 - fs/bfs/dir.c | 1 - fs/bfs/file.c | 1 - fs/btrfs/compression.c | 1 - fs/btrfs/file.c | 1 - fs/btrfs/inode.c | 1 - fs/btrfs/ioctl.c | 1 - fs/btrfs/super.c | 1 - fs/char_dev.c | 1 - fs/compat.c | 1 - fs/compat_ioctl.c | 1 + fs/exofs/super.c | 1 + fs/ext2/ioctl.c | 1 - fs/ext4/ioctl.c | 1 - fs/fat/dir.c | 1 - fs/fat/namei_msdos.c | 1 - fs/fat/namei_vfat.c | 1 - fs/fcntl.c | 1 - fs/freevxfs/vxfs_super.c | 1 + fs/hfs/super.c | 1 + fs/hfsplus/super.c | 1 + fs/hpfs/dir.c | 1 + fs/hpfs/file.c | 1 + fs/hpfs/hpfs_fn.h | 1 - fs/hpfs/inode.c | 1 + fs/hpfs/namei.c | 1 + fs/jffs2/super.c | 1 + fs/lockd/clntproc.c | 1 + fs/lockd/svc4proc.c | 1 + fs/lockd/svcproc.c | 1 + fs/nfs/delegation.c | 1 + fs/nfs/dir.c | 1 - fs/nfs/file.c | 1 - fs/nfs/inode.c | 1 - fs/nfs/nfs4proc.c | 1 - fs/nfs/read.c | 1 - fs/nfsd/nfsctl.c | 1 - fs/nfsd/nfssvc.c | 1 - fs/nilfs2/dir.c | 1 - fs/ocfs2/ioctl.c | 1 - fs/reiserfs/xattr.c | 1 - fs/squashfs/super.c | 1 + fs/ubifs/ioctl.c | 1 - fs/xfs/linux-2.6/xfs_file.c | 1 - 46 files changed, 17 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/adfs/super.c b/fs/adfs/super.c index aad92f0a104..6910a98bd73 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "adfs.h" #include "dir_f.h" diff --git a/fs/afs/super.c b/fs/afs/super.c index ad0514d0115..e1ea1c240b6 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index f3da2eb51f5..00bf8fcb245 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 54bd07d44e6..1e41aadb106 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include "bfs.h" diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 6a021265f01..88b9a3ff44e 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -11,7 +11,6 @@ #include #include -#include #include "bfs.h" #undef DEBUG diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index de1e2fd3208..9d8ba4d54a3 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7c3cd248d8d..4b833972273 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7ffa3d34ea1..791eab19e33 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9f4db848db1..bd88f25889f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9f179d4832d..6d6d06cb6df 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/char_dev.c b/fs/char_dev.c index b7c9d5187a7..a173551e19d 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include diff --git a/fs/compat.c b/fs/compat.c index fbadb947727..94502dab972 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 626c7483b4d..f28f070a60f 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/exofs/super.c b/fs/exofs/super.c index a343b4ea62f..5ab10c3bbeb 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -31,6 +31,7 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include #include #include #include diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c index 7cb4badef92..e7431309bdc 100644 --- a/fs/ext2/ioctl.c +++ b/fs/ext2/ioctl.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bb415408fdb..24a6abb2aef 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 38ff75a0fe2..530b4ca0151 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 82f88733b68..bbc94ae4fd7 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "fat.h" /* Characters that are undesirable in an MS-DOS file name */ diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 73471b7ecc8..cb6e8355711 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include "fat.h" diff --git a/fs/fcntl.c b/fs/fcntl.c index a040b764f8e..ae413086db9 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index cdbd1654e4c..1e8af939b3e 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 6f833dc8e91..f7fcbe49da7 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "hfs_fs.h" diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 9fc3af0c0da..c0759fe0855 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 6916c41d701..8865c94f55f 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -6,6 +6,7 @@ * directory VFS functions */ +#include #include "hpfs_fn.h" static int hpfs_dir_release(struct inode *inode, struct file *filp) diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 64ab5225920..3efabff0036 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -6,6 +6,7 @@ * file VFS functions */ +#include #include "hpfs_fn.h" #define BLOCKS(size) (((size) + 511) >> 9) diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index c2ea31bae31..701ca54c086 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -13,7 +13,6 @@ #include #include #include -#include #include "hpfs.h" diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 39a1bfbea31..fe703ae46bc 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -6,6 +6,7 @@ * inode VFS functions */ +#include #include "hpfs_fn.h" void hpfs_init_inode(struct inode *i) diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index b649232dde9..82b9c4ba9ed 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -6,6 +6,7 @@ * adding & removing files & directories */ #include +#include #include "hpfs_fn.h" static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 07a22caf268..0035c021395 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index f2fdcbce143..4336adba952 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 1725037374c..bd173a6ca3b 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 3688e55901f..e1d28ddd216 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index af05b918cb5..6dd48a4405b 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 89f98e9a024..38d42c29fb9 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 0055b813ec2..05062329b67 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 64f87194d39..bd7938eda6a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 92ce4351781..ff0c080db59 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 96c4ebfa46f..73ea5e8d66c 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -18,7 +18,6 @@ #include #include #include -#include #include diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 1250fb978ac..6d0847562d8 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index d4c9884cd54..492c79b7800 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 54100acc110..1a4fa04cf07 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -43,7 +43,6 @@ */ #include -#include #include "nilfs.h" #include "page.h" diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 9fcd36dcc9a..467b413bec2 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -7,7 +7,6 @@ #include #include -#include #define MLOG_MASK_PREFIX ML_INODE #include diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index f3d47d85684..6925b835a43 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 3b52770f46f..cb5fc57e370 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 6db7a6be6c9..8aacd64957a 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -25,7 +25,6 @@ /* This file implements EXT2-compatible extended attribute ioctl() calls */ #include -#include #include #include "ubifs.h" diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index f4e25544157..0542fd50764 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -41,7 +41,6 @@ #include "xfs_ioctl.h" #include -#include static struct vm_operations_struct xfs_file_vm_ops; -- cgit v1.2.3 From dd0d9a46f573b086a67522f819566427dba9c4c7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 9 Jul 2009 10:44:30 +0100 Subject: AFS: Fix compilation warning Fix the following warning: fs/afs/dir.c: In function 'afs_d_revalidate': fs/afs/dir.c:567: warning: 'fid.vnode' may be used uninitialized in this function fs/afs/dir.c:567: warning: 'fid.unique' may be used uninitialized in this function by marking the 'fid' variable as an uninitialized_var. The problem is that gcc doesn't always manage to work out that fid is always set on the path through the function that uses it. Cc: linux-afs@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Artem Bityutskiy Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 9bd757774c9..88067f36e5e 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -564,7 +564,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct afs_vnode *vnode, *dir; - struct afs_fid fid; + struct afs_fid uninitialized_var(fid); struct dentry *parent; struct key *key; void *dir_version; -- cgit v1.2.3 From f8c73c790c588fd70fda1632c8927a87b3d31dcd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 11 Jun 2009 15:14:40 +0200 Subject: partitions: fix broken uevent_suppress conversion git commit f67f129e "Driver core: implement uevent suppress in kobject" contains this chunk for fs/partitions/check.c: /* suppress uevent if the disk supresses it */ - if (!ddev->uevent_suppress) + if (!dev_get_uevent_suppress(pdev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); However that should have been - if (!ddev->uevent_suppress) + if (!dev_get_uevent_suppress(ddev)) Signed-off-by: Heiko Carstens Acked-by: Ming Lei Cc: stable Signed-off-by: Greg Kroah-Hartman --- fs/partitions/check.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 1a9c7878f86..ea4e6cb29e1 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -436,7 +436,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, rcu_assign_pointer(ptbl->part[partno], p); /* suppress uevent if the disk supresses it */ - if (!dev_get_uevent_suppress(pdev)) + if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); return p; -- cgit v1.2.3 From d0b6e04a4cd8360e3c9c419f7c30a3081a0c142a Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 13 Jul 2009 10:33:21 +0800 Subject: tracing/events: Move TRACE_SYSTEM outside of include guard If TRACE_INCLDUE_FILE is defined, will be included and compiled, otherwise it will be So TRACE_SYSTEM should be defined outside of #if proctection, just like TRACE_INCLUDE_FILE. Imaging this scenario: #include -> TRACE_SYSTEM == foo ... #include -> TRACE_SYSTEM == bar ... #define CREATE_TRACE_POINTS #include -> TRACE_SYSTEM == bar !!! and then bar.h will be included and compiled. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4A5A9CF1.2010007@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- fs/gfs2/trace_gfs2.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index 98d6ef1c1dc..148d55c1417 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -1,12 +1,11 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gfs2 + #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_GFS2_H #include -#undef TRACE_SYSTEM -#define TRACE_SYSTEM gfs2 -#define TRACE_INCLUDE_FILE trace_gfs2 - #include #include #include @@ -403,5 +402,6 @@ TRACE_EVENT(gfs2_block_alloc, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_gfs2 #include -- cgit v1.2.3 From e6b5d30104db5f34110678ecab14988f1f1eff63 Mon Sep 17 00:00:00 2001 From: Curt Wohlgemuth Date: Mon, 13 Jul 2009 09:07:20 -0400 Subject: ext4: Fix buffer head reference leak in no-journal mode We found a problem with buffer head reference leaks when using an ext4 partition without a journal. In particular, calls to ext4_forget() would not to a brelse() on the input buffer head, which will cause pages they belong to to not be reclaimable. Further investigation showed that all places where ext4_journal_forget() and ext4_journal_revoke() are called are subject to the same problem. The patch below changes __ext4_journal_forget/__ext4_journal_revoke to do an explicit release of the buffer head when the journal handle isn't valid. Signed-off-by: Curt Wohlgemuth Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4_jbd2.c | 4 ++++ fs/ext4/ext4_jbd2.h | 2 ++ fs/ext4/inode.c | 6 ++---- 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index ad13a84644e..eb27fd0f2ee 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle, ext4_journal_abort_handle(where, __func__, bh, handle, err); } + else + brelse(bh); return err; } @@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle, ext4_journal_abort_handle(where, __func__, bh, handle, err); } + else + brelse(bh); return err; } diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index d574a85aca5..139fb8cb87e 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle, int __ext4_journal_get_write_access(const char *where, handle_t *handle, struct buffer_head *bh); +/* When called with an invalid handle, this will still do a put on the BH */ int __ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh); +/* When called with an invalid handle, this will still do a put on the BH */ int __ext4_journal_revoke(const char *where, handle_t *handle, ext4_fsblk_t blocknr, struct buffer_head *bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c98e3afea30..f9c642b22ef 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -78,16 +78,14 @@ static int ext4_inode_is_fast_symlink(struct inode *inode) * but there may still be a record of it in the journal, and that record * still needs to be revoked. * - * If the handle isn't valid we're not journaling so there's nothing to do. + * If the handle isn't valid we're not journaling, but we still need to + * call into ext4_journal_revoke() to put the buffer head. */ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr) { int err; - if (!ext4_handle_valid(handle)) - return 0; - might_sleep(); BUFFER_TRACE(bh, "enter"); -- cgit v1.2.3 From ac046f1d6121ccdda6db66bd88acd52418f489b2 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 13 Jul 2009 09:30:17 -0400 Subject: ext4: fix null handler of ioctls in no journal mode The EXT4_IOC_GROUP_ADD and EXT4_IOC_GROUP_EXTEND ioctls should not flush the journal in no_journal mode. Otherwise, running resize2fs on a mounted no_journal partition triggers the following error messages: BUG: unable to handle kernel NULL pointer dereference at 00000014 IP: [] _spin_lock+0x8/0x19 *pde = 00000000 Oops: 0002 [#1] SMP Signed-off-by: Peng Tao Signed-off-by: "Theodore Ts'o" --- fs/ext4/ioctl.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bb415408fdb..01f149aea84 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -192,7 +192,7 @@ setversion_out: case EXT4_IOC_GROUP_EXTEND: { ext4_fsblk_t n_blocks_count; struct super_block *sb = inode->i_sb; - int err, err2; + int err, err2=0; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; @@ -205,9 +205,11 @@ setversion_out: return err; err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); - jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); - err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); - jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + if (EXT4_SB(sb)->s_journal) { + jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); + err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); + jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + } if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); @@ -252,7 +254,7 @@ setversion_out: case EXT4_IOC_GROUP_ADD: { struct ext4_new_group_data input; struct super_block *sb = inode->i_sb; - int err, err2; + int err, err2=0; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; @@ -266,9 +268,11 @@ setversion_out: return err; err = ext4_group_add(sb, &input); - jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); - err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); - jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + if (EXT4_SB(sb)->s_journal) { + jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); + err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); + jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + } if (err == 0) err = err2; mnt_drop_write(filp->f_path.mnt); -- cgit v1.2.3 From 833576b362e15c38be3bfe43942cda693e56287c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 13 Jul 2009 09:45:52 -0400 Subject: ext4: Fix ext4_mb_initialize_context() to initialize all fields Pavel Roskin pointed out that kmemcheck indicated that ext4_mb_store_history() was accessing uninitialized values of ac->ac_tail and ac->ac_buddy leading to garbage in the mballoc history. Fix this by initializing the entire structure to all zeros first. Also, two fields were getting doubly initialized by the caller of ext4_mb_initialize_context, so remove them for efficiency's sake. Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 2fcaf286f1d..cd258463e2a 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4227,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ + memset(ac, 0, sizeof(struct ext4_allocation_context)); ac->ac_b_ex.fe_logical = ar->logical; - ac->ac_b_ex.fe_group = 0; - ac->ac_b_ex.fe_start = 0; - ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; - ac->ac_groups_scanned = 0; - ac->ac_ex_scanned = 0; - ac->ac_found = 0; ac->ac_sb = sb; ac->ac_inode = ar->inode; ac->ac_o_ex.fe_logical = ar->logical; @@ -4245,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ac->ac_g_ex.fe_group = group; ac->ac_g_ex.fe_start = block; ac->ac_g_ex.fe_len = len; - ac->ac_f_ex.fe_len = 0; ac->ac_flags = ar->flags; - ac->ac_2order = 0; - ac->ac_criteria = 0; - ac->ac_pa = NULL; - ac->ac_bitmap_page = NULL; - ac->ac_buddy_page = NULL; - ac->alloc_semp = NULL; - ac->ac_lg = NULL; /* we have to define context: we'll we work with a file or * locality group. this is a policy, actually */ @@ -4521,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, } ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); - if (ac) { - ac->ac_sb = sb; - ac->ac_inode = ar->inode; - } else { + if (!ac) { ar->len = 0; *errp = -ENOMEM; goto out1; -- cgit v1.2.3 From 96577c43827697ca1af5982fa256a34786d0c720 Mon Sep 17 00:00:00 2001 From: dingdinghua Date: Mon, 13 Jul 2009 17:55:35 -0400 Subject: jbd2: fix race between write_metadata_buffer and get_write_access The function jbd2_journal_write_metadata_buffer() calls jbd_unlock_bh_state(bh_in) too early; this could potentially allow another thread to call get_write_access on the buffer head, modify the data, and dirty it, and allowing the wrong data to be written into the journal. Fortunately, if we lose this race, the only time this will actually cause filesystem corruption is if there is a system crash or other unclean shutdown of the system before the next commit can take place. Signed-off-by: dingdinghua Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 7b545c3b394..e378cb38397 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); struct jbd2_buffer_trigger_type *triggers; + journal_t *journal = transaction->t_journal; /* * The buffer really shouldn't be locked: only the current committing @@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); + /* keep subsequent assertions sane */ + new_bh->b_state = 0; + init_buffer(new_bh, NULL, NULL); + atomic_set(&new_bh->b_count, 1); + new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */ /* * If a new transaction has already done a buffer copy-out, then @@ -388,14 +394,6 @@ repeat: kunmap_atomic(mapped_data, KM_USER0); } - /* keep subsequent assertions sane */ - new_bh->b_state = 0; - init_buffer(new_bh, NULL, NULL); - atomic_set(&new_bh->b_count, 1); - jbd_unlock_bh_state(bh_in); - - new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */ - set_bh_page(new_bh, new_page, new_offset); new_jh->b_transaction = NULL; new_bh->b_size = jh2bh(jh_in)->b_size; @@ -412,7 +410,11 @@ repeat: * copying is moved to the transaction's shadow queue. */ JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); - jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); + spin_lock(&journal->j_list_lock); + __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh_in); + JBUFFER_TRACE(new_jh, "file as BJ_IO"); jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); -- cgit v1.2.3 From 4fed598a49c014cbc563179b25f2a4b8565e2a50 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 12 Jul 2009 11:13:55 +0900 Subject: fs/Kconfig: move nilfs2 out fs/Kconfig file was split into individual fs/*/Kconfig files before nilfs was merged. I've found the current config entry of nilfs is tainting the work. Sorry, I didn't notice. This fixes the violation. Signed-off-by: Ryusuke Konishi Cc: Alexey Dobriyan --- fs/Kconfig | 27 +-------------------------- fs/nilfs2/Kconfig | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 26 deletions(-) create mode 100644 fs/nilfs2/Kconfig (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index a97263be6a9..0e7da7bb5d9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -186,32 +186,7 @@ source "fs/romfs/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" source "fs/exofs/Kconfig" - -config NILFS2_FS - tristate "NILFS2 file system support (EXPERIMENTAL)" - depends on BLOCK && EXPERIMENTAL - select CRC32 - help - NILFS2 is a log-structured file system (LFS) supporting continuous - snapshotting. In addition to versioning capability of the entire - file system, users can even restore files mistakenly overwritten or - destroyed just a few seconds ago. Since this file system can keep - consistency like conventional LFS, it achieves quick recovery after - system crashes. - - NILFS2 creates a number of checkpoints every few seconds or per - synchronous write basis (unless there is no change). Users can - select significant versions among continuously created checkpoints, - and can change them into snapshots which will be preserved for long - periods until they are changed back to checkpoints. Each - snapshot is mountable as a read-only file system concurrently with - its writable mount, and this feature is convenient for online backup. - - Some features including atime, extended attributes, and POSIX ACLs, - are not supported yet. - - To compile this file system support as a module, choose M here: the - module will be called nilfs2. If unsure, say N. +source "fs/nilfs2/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig new file mode 100644 index 00000000000..72da095d400 --- /dev/null +++ b/fs/nilfs2/Kconfig @@ -0,0 +1,25 @@ +config NILFS2_FS + tristate "NILFS2 file system support (EXPERIMENTAL)" + depends on BLOCK && EXPERIMENTAL + select CRC32 + help + NILFS2 is a log-structured file system (LFS) supporting continuous + snapshotting. In addition to versioning capability of the entire + file system, users can even restore files mistakenly overwritten or + destroyed just a few seconds ago. Since this file system can keep + consistency like conventional LFS, it achieves quick recovery after + system crashes. + + NILFS2 creates a number of checkpoints every few seconds or per + synchronous write basis (unless there is no change). Users can + select significant versions among continuously created checkpoints, + and can change them into snapshots which will be preserved for long + periods until they are changed back to checkpoints. Each + snapshot is mountable as a read-only file system concurrently with + its writable mount, and this feature is convenient for online backup. + + Some features including atime, extended attributes, and POSIX ACLs, + are not supported yet. + + To compile this file system support as a module, choose M here: the + module will be called nilfs2. If unsure, say N. -- cgit v1.2.3 From a89d63a159b1ba5833be2bef00adf8ad8caac8be Mon Sep 17 00:00:00 2001 From: Casey Dahlin Date: Tue, 14 Jul 2009 12:17:51 -0500 Subject: dlm: free socket in error exit path In the tcp_connect_to_sock() error exit path, the socket allocated at the top of the function was not being freed. Signed-off-by: Casey Dahlin Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index cdb580a9c7a..618a60f0388 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -902,7 +902,7 @@ static void tcp_connect_to_sock(struct connection *con) int result = -EHOSTUNREACH; struct sockaddr_storage saddr, src_addr; int addr_len; - struct socket *sock; + struct socket *sock = NULL; if (con->nodeid == 0) { log_print("attempt to connect sock 0 foiled"); @@ -962,6 +962,8 @@ out_err: if (con->sock) { sock_release(con->sock); con->sock = NULL; + } else if (sock) { + sock_release(sock); } /* * Some errors are fatal and this list might need adjusting. For other -- cgit v1.2.3 From 9c9ad6162e2aa1e528ed687ccab87fe681ebbef1 Mon Sep 17 00:00:00 2001 From: Abhishek Kulkarni Date: Tue, 14 Jul 2009 13:26:52 -0500 Subject: 9p: Fix incorrect parameters to v9fs_file_readn. Fix v9fs_vfs_readpage. The offset and size parameters to v9fs_file_readn were interchanged and hence passed incorrectly. Signed-off-by: Abhishek Kulkarni Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_addr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 6fcb1e7095c..92828281a30 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -57,7 +57,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page) buffer = kmap(page); offset = page_offset(page); - retval = v9fs_file_readn(filp, buffer, NULL, offset, PAGE_CACHE_SIZE); + retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset); if (retval < 0) goto done; -- cgit v1.2.3 From 7447a668a3860b66b3c9db86fdea91e355ba59ac Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 15 Jul 2009 20:36:08 +0200 Subject: jbd: Fail to load a journal if it is too short Due to on disk corruption, it can happen that journal is too short. Fail to load it in such case so that we don't oops somewhere later. Reported-by: Nageswara R Sastry Signed-off-by: Jan Kara --- fs/jbd/journal.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 737f7246a4b..94a64a199a6 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -848,6 +848,12 @@ static int journal_reset(journal_t *journal) first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); + if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) { + printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n", + first, last); + journal_fail_superblock(journal); + return -EINVAL; + } journal->j_first = first; journal->j_last = last; -- cgit v1.2.3 From 9eaaa2d5759837402ec5eee13b2a97921808c3eb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 13 Jul 2009 20:26:52 +0200 Subject: ext3: Fix truncation of symlinks after failed write Contents of long symlinks is written via standard write methods. So when the write fails, we add inode to orphan list. But symlinks don't have .truncate method defined so nobody properly removes them from the orphan list (both on disk and in memory). Fix this by calling ext3_truncate() directly instead of calling vmtruncate() (which is saner anyway since we don't need anything vmtruncate() does except from calling .truncate in these paths). We also add inode to orphan list only if ext3_can_truncate() is true (currently, it can be false for symlinks when there are no blocks allocated) - otherwise orphan list processing will complain and ext3_truncate() will not remove inode from on-disk orphan list. Signed-off-by: Jan Kara --- fs/ext3/inode.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 5f51fed5c75..4d7da6f6184 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1193,15 +1193,16 @@ write_begin_failed: * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before truncate - * finishes. + * finishes. Do this only if ext3_can_truncate() agrees so + * that orphan processing code is happy. */ - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ext3_journal_stop(handle); unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + ext3_truncate(inode); } if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; @@ -1287,7 +1288,7 @@ static int ext3_ordered_write_end(struct file *file, * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ret2 = ext3_journal_stop(handle); if (!ret) @@ -1296,7 +1297,7 @@ static int ext3_ordered_write_end(struct file *file, page_cache_release(page); if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + ext3_truncate(inode); return ret ? ret : copied; } @@ -1315,14 +1316,14 @@ static int ext3_writeback_write_end(struct file *file, * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); ret = ext3_journal_stop(handle); unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + ext3_truncate(inode); return ret ? ret : copied; } @@ -1358,7 +1359,7 @@ static int ext3_journalled_write_end(struct file *file, * There may be allocated blocks outside of i_size because * we failed to copy some data. Prepare for truncate. */ - if (pos + len > inode->i_size) + if (pos + len > inode->i_size && ext3_can_truncate(inode)) ext3_orphan_add(handle, inode); EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; if (inode->i_size > EXT3_I(inode)->i_disksize) { @@ -1375,7 +1376,7 @@ static int ext3_journalled_write_end(struct file *file, page_cache_release(page); if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + ext3_truncate(inode); return ret ? ret : copied; } -- cgit v1.2.3 From 1e9fd53b783ea646de3ee09a4574afeb6778d504 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 24 Jun 2009 17:31:40 +0200 Subject: jbd: Fix a race between checkpointing code and journal_get_write_access() The following race can happen: CPU1 CPU2 checkpointing code checks the buffer, adds it to an array for writeback do_get_write_access() ... lock_buffer() unlock_buffer() flush_batch() submits the buffer for IO __jbd_journal_file_buffer() So a buffer under writeout is returned from do_get_write_access(). Since the filesystem code relies on the fact that journaled buffers cannot be written out, it does not take the buffer lock and so it can modify buffer while it is under writeout. That can lead to a filesystem corruption if we crash at the right moment. The similar problem can happen with the journal_get_create_access() path. We fix the problem by clearing the buffer dirty bit under buffer_lock even if the buffer is on BJ_None list. Actually, we clear the dirty bit regardless the list the buffer is in and warn about the fact if the buffer is already journalled. Thanks for spotting the problem goes to dingdinghua . Reported-by: dingdinghua Signed-off-by: Jan Kara --- fs/jbd/transaction.c | 68 +++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 73242ba7c7b..c03ac11f74b 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal) wake_up(&journal->j_wait_transaction_locked); } -/* - * Report any unexpected dirty buffers which turn up. Normally those - * indicate an error, but they can occur if the user is running (say) - * tune2fs to modify the live filesystem, so we need the option of - * continuing as gracefully as possible. # - * - * The caller should already hold the journal lock and - * j_list_lock spinlock: most callers will need those anyway - * in order to probe the buffer's journaling state safely. - */ -static void jbd_unexpected_dirty_buffer(struct journal_head *jh) +static void warn_dirty_buffer(struct buffer_head *bh) { - int jlist; - - /* If this buffer is one which might reasonably be dirty - * --- ie. data, or not part of this journal --- then - * we're OK to leave it alone, but otherwise we need to - * move the dirty bit to the journal's own internal - * JBDDirty bit. */ - jlist = jh->b_jlist; + char b[BDEVNAME_SIZE]; - if (jlist == BJ_Metadata || jlist == BJ_Reserved || - jlist == BJ_Shadow || jlist == BJ_Forget) { - struct buffer_head *bh = jh2bh(jh); - - if (test_clear_buffer_dirty(bh)) - set_buffer_jbddirty(bh); - } + printk(KERN_WARNING + "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " + "There's a risk of filesystem corruption in case of system " + "crash.\n", + bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); } /* @@ -583,14 +564,16 @@ repeat: if (jh->b_next_transaction) J_ASSERT_JH(jh, jh->b_next_transaction == transaction); + warn_dirty_buffer(bh); } /* * In any case we need to clean the dirty flag and we must * do it under the buffer lock to be sure we don't race * with running write-out. */ - JBUFFER_TRACE(jh, "Unexpected dirty buffer"); - jbd_unexpected_dirty_buffer(jh); + JBUFFER_TRACE(jh, "Journalling dirty buffer"); + clear_buffer_dirty(bh); + set_buffer_jbddirty(bh); } unlock_buffer(bh); @@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh) J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { + /* + * Previous journal_forget() could have left the buffer + * with jbddirty bit set because it was being committed. When + * the commit finished, we've filed the buffer for + * checkpointing and marked it dirty. Now we are reallocating + * the buffer so the transaction freeing it must have + * committed and so it's safe to clear the dirty bit. + */ + clear_buffer_dirty(jh2bh(jh)); jh->b_transaction = transaction; /* first access by this transaction */ @@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); + /* + * We don't want to write the buffer anymore, clear the + * bit so that we don't confuse checks in + * __journal_file_buffer + */ + clear_buffer_dirty(bh); __journal_file_buffer(jh, transaction, BJ_Forget); - clear_buffer_jbddirty(bh); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); @@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh, if (jh->b_transaction && jh->b_jlist == jlist) return; - /* The following list of buffer states needs to be consistent - * with __jbd_unexpected_dirty_buffer()'s handling of dirty - * state. */ - if (jlist == BJ_Metadata || jlist == BJ_Reserved || jlist == BJ_Shadow || jlist == BJ_Forget) { + /* + * For metadata buffers, we track dirty bit in buffer_jbddirty + * instead of buffer_dirty. We should not see a dirty bit set + * here because we clear it in do_get_write_access but e.g. + * tune2fs can modify the sb and set the dirty bit at any time + * so we try to gracefully handle that. + */ + if (buffer_dirty(bh)) + warn_dirty_buffer(bh); if (test_clear_buffer_dirty(bh) || test_clear_buffer_jbddirty(bh)) was_dirty = 1; -- cgit v1.2.3 From 43237b5490e8f2f4679decd660064ff35ce490cc Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 May 2009 18:41:58 +0200 Subject: ext3: Get rid of extenddisksize parameter of ext3_get_blocks_handle() Get rid of extenddisksize parameter of ext3_get_blocks_handle(). This seems to be a relict from some old days and setting disksize in this function does not make much sence. Currently it was set only by ext3_getblk(). Since the parameter has some effect only if create == 1, it is easy to check that the three callers which end up calling ext3_getblk() with create == 1 (ext3_append, ext3_quota_write, ext3_mkdir) do the right thing and set disksize themselves. Signed-off-by: Jan Kara --- fs/ext3/dir.c | 3 +-- fs/ext3/inode.c | 13 +++---------- 2 files changed, 4 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 3d724a95882..373fa90c796 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp, struct buffer_head *bh = NULL; map_bh.b_state = 0; - err = ext3_get_blocks_handle(NULL, inode, blk, 1, - &map_bh, 0, 0); + err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0); if (err > 0) { pgoff_t index = map_bh.b_blocknr >> (PAGE_CACHE_SHIFT - inode->i_blkbits); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 4d7da6f6184..b49908a167a 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -788,7 +788,7 @@ err_out: int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, - int create, int extend_disksize) + int create) { int err = -EIO; int offsets[4]; @@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, if (!err) err = ext3_splice_branch(handle, inode, iblock, partial, indirect_blks, count); - /* - * i_disksize growing is protected by truncate_mutex. Don't forget to - * protect it if you're about to implement concurrent - * ext3_get_block() -bzzz - */ - if (!err && extend_disksize && inode->i_size > ei->i_disksize) - ei->i_disksize = inode->i_size; mutex_unlock(&ei->truncate_mutex); if (err) goto cleanup; @@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, } ret = ext3_get_blocks_handle(handle, inode, iblock, - max_blocks, bh_result, create, 0); + max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; @@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, dummy.b_blocknr = -1000; buffer_trace_init(&dummy.b_history); err = ext3_get_blocks_handle(handle, inode, block, 1, - &dummy, create, 1); + &dummy, create); /* * ext3_get_blocks_handle() returns number of blocks * mapped. 0 in case of a HOLE. -- cgit v1.2.3 From 90a98b2f3f3647fb17667768a348b2b219f2a9f7 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 20 Jul 2009 13:40:52 -0400 Subject: cifs: free nativeFileSystem field before allocating a new one ...otherwise, we'll leak this memory if we have to reconnect (e.g. after network failure). Signed-off-by: Jeff Layton CC: Stable Signed-off-by: Steve French --- fs/cifs/connect.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e16d7592116..9bb5c875073 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2726,6 +2726,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, strncpy(tcon->treeName, tree, MAX_TREE_SIZE); /* mostly informational -- no need to fail on error here */ + kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, bytes_left, is_unicode, nls_codepage); -- cgit v1.2.3 From f1015c447781729060c415f5133164c638561f25 Mon Sep 17 00:00:00 2001 From: dingdinghua Date: Wed, 15 Jul 2009 21:42:05 +0200 Subject: jbd: fix race between write_metadata_buffer and get_write_access The function journal_write_metadata_buffer() calls jbd_unlock_bh_state(bh_in) too early; this could potentially allow another thread to call get_write_access on the buffer head, modify the data, and dirty it, and allowing the wrong data to be written into the journal. Fortunately, if we lose this race, the only time this will actually cause filesystem corruption is if there is a system crash or other unclean shutdown of the system before the next commit can take place. Signed-off-by: dingdinghua Acked-by: "Theodore Ts'o" Signed-off-by: Jan Kara --- fs/jbd/journal.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 94a64a199a6..f96f85092d1 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction, struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); + journal_t *journal = transaction->t_journal; /* * The buffer really shouldn't be locked: only the current committing @@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction, J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); + /* keep subsequent assertions sane */ + new_bh->b_state = 0; + init_buffer(new_bh, NULL, NULL); + atomic_set(&new_bh->b_count, 1); + new_jh = journal_add_journal_head(new_bh); /* This sleeps */ /* * If a new transaction has already done a buffer copy-out, then @@ -361,14 +367,6 @@ repeat: kunmap_atomic(mapped_data, KM_USER0); } - /* keep subsequent assertions sane */ - new_bh->b_state = 0; - init_buffer(new_bh, NULL, NULL); - atomic_set(&new_bh->b_count, 1); - jbd_unlock_bh_state(bh_in); - - new_jh = journal_add_journal_head(new_bh); /* This sleeps */ - set_bh_page(new_bh, new_page, new_offset); new_jh->b_transaction = NULL; new_bh->b_size = jh2bh(jh_in)->b_size; @@ -385,7 +383,11 @@ repeat: * copying is moved to the transaction's shadow queue. */ JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); - journal_file_buffer(jh_in, transaction, BJ_Shadow); + spin_lock(&journal->j_list_lock); + __journal_file_buffer(jh_in, transaction, BJ_Shadow); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh_in); + JBUFFER_TRACE(new_jh, "file as BJ_IO"); journal_file_buffer(new_jh, transaction, BJ_IO); -- cgit v1.2.3 From 5549f7cdf84c02939fd368d0842aa2f472bb6e98 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 7 Jul 2009 10:28:23 -0400 Subject: inotify: drop user watch count when a watch is removed The inotify rewrite forgot to drop the inotify watch use cound when a watch was removed. This means that a single inotify fd can only ever register a maximum of /proc/sys/fs/max_user_watches even if some of those had been freed. Signed-off-by: Eric Paris --- fs/notify/inotify/inotify_user.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ff27a296584..1a870f9157b 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -404,6 +404,8 @@ skip_send_ignore: /* removed from idr, drop that reference */ fsnotify_put_mark(entry); + + atomic_dec(&group->inotify_data.user->inotify_watches); } /* ding dong the mark is dead */ -- cgit v1.2.3 From 75fe2b26394c59c8e16bd7b76f4be5d048103ad1 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 7 Jul 2009 10:28:23 -0400 Subject: inotify: do not leak inode marks in inotify_add_watch inotify_add_watch had a couple of problems. The biggest being that if inotify_add_watch was called on the same inode twice (to update or change the event mask) a refence was taken on the original inode mark by fsnotify_find_mark_entry but was not being dropped at the end of the inotify_add_watch call. Thus if inotify_rm_watch was called although the mark was removed from the inode, the refcnt wouldn't hit zero and we would leak memory. Reported-by: Catalin Marinas Signed-off-by: Eric Paris --- fs/notify/inotify/inotify_user.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 1a870f9157b..aff4214f16c 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -463,9 +463,6 @@ retry: goto out_err; spin_lock(&group->inotify_data.idr_lock); - /* if entry is added to the idr we keep the reference obtained - * through fsnotify_mark_add. remember to drop this reference - * when entry is removed from idr */ ret = idr_get_new_above(&group->inotify_data.idr, entry, ++group->inotify_data.last_wd, &ientry->wd); @@ -476,8 +473,13 @@ retry: goto out_err; } atomic_inc(&group->inotify_data.user->inotify_watches); + + /* we put the mark on the idr, take a reference */ + fsnotify_get_mark(entry); } + ret = ientry->wd; + spin_lock(&entry->lock); old_mask = entry->mask; @@ -508,7 +510,11 @@ retry: fsnotify_recalc_group_mask(group); } - return ientry->wd; + /* this either matches fsnotify_find_mark_entry, or init_mark_entry + * depending on which path we took... */ + fsnotify_put_mark(entry); + + return ret; out_err: /* see this isn't supposed to happen, just kill the watch */ -- cgit v1.2.3 From 7e790dd5fc937bc8d2400c30a05e32a9e9eef276 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 7 Jul 2009 10:28:24 -0400 Subject: inotify: fix error paths in inotify_update_watch inotify_update_watch could leave things in a horrid state on a number of error paths. We could try to remove idr entries that didn't exist, we could send an IN_IGNORED to userspace for watches that don't exist, and a bit of other stupidity. Clean these up by doing the idr addition before we put the mark on the inode since we can clean that up on error and getting off the inode's mark list is hard. Signed-off-by: Eric Paris --- fs/notify/inotify/inotify_user.c | 79 +++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index aff4214f16c..726118a5845 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -365,6 +365,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns return error; } +static void inotify_remove_from_idr(struct fsnotify_group *group, + struct inotify_inode_mark_entry *ientry) +{ + struct idr *idr; + + spin_lock(&group->inotify_data.idr_lock); + idr = &group->inotify_data.idr; + idr_remove(idr, ientry->wd); + spin_unlock(&group->inotify_data.idr_lock); + ientry->wd = -1; +} /* * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the * internal reference help on the mark because it is in the idr. @@ -375,7 +386,6 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, struct inotify_inode_mark_entry *ientry; struct inotify_event_private_data *event_priv; struct fsnotify_event_private_data *fsn_event_priv; - struct idr *idr; ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); @@ -397,10 +407,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, skip_send_ignore: /* remove this entry from the idr */ - spin_lock(&group->inotify_data.idr_lock); - idr = &group->inotify_data.idr; - idr_remove(idr, ientry->wd); - spin_unlock(&group->inotify_data.idr_lock); + inotify_remove_from_idr(group, ientry); /* removed from idr, drop that reference */ fsnotify_put_mark(entry); @@ -420,6 +427,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod { struct fsnotify_mark_entry *entry = NULL; struct inotify_inode_mark_entry *ientry; + struct inotify_inode_mark_entry *tmp_ientry; int ret = 0; int add = (arg & IN_MASK_ADD); __u32 mask; @@ -430,50 +438,60 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod if (unlikely(!mask)) return -EINVAL; - ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); - if (unlikely(!ientry)) + tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); + if (unlikely(!tmp_ientry)) return -ENOMEM; /* we set the mask at the end after attaching it */ - fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); - ientry->wd = 0; + fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); + tmp_ientry->wd = -1; find_entry: spin_lock(&inode->i_lock); entry = fsnotify_find_mark_entry(group, inode); spin_unlock(&inode->i_lock); if (entry) { - kmem_cache_free(inotify_inode_mark_cachep, ientry); ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); } else { - if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { - ret = -ENOSPC; - goto out_err; - } - - ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode); - if (ret == -EEXIST) - goto find_entry; - else if (ret) + ret = -ENOSPC; + if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) goto out_err; - - entry = &ientry->fsn_entry; retry: ret = -ENOMEM; if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) goto out_err; spin_lock(&group->inotify_data.idr_lock); - ret = idr_get_new_above(&group->inotify_data.idr, entry, - ++group->inotify_data.last_wd, - &ientry->wd); + ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, + group->inotify_data.last_wd, + &tmp_ientry->wd); spin_unlock(&group->inotify_data.idr_lock); if (ret) { if (ret == -EAGAIN) goto retry; goto out_err; } + + ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); + if (ret) { + inotify_remove_from_idr(group, tmp_ientry); + if (ret == -EEXIST) + goto find_entry; + goto out_err; + } + + /* tmp_ientry has been added to the inode, so we are all set up. + * now we just need to make sure tmp_ientry doesn't get freed and + * we need to set up entry and ientry so the generic code can + * do its thing. */ + ientry = tmp_ientry; + entry = &ientry->fsn_entry; + tmp_ientry = NULL; + atomic_inc(&group->inotify_data.user->inotify_watches); + /* update the idr hint */ + group->inotify_data.last_wd = ientry->wd; + /* we put the mark on the idr, take a reference */ fsnotify_get_mark(entry); } @@ -514,14 +532,15 @@ retry: * depending on which path we took... */ fsnotify_put_mark(entry); - return ret; - out_err: - /* see this isn't supposed to happen, just kill the watch */ - if (entry) { - fsnotify_destroy_mark_by_entry(entry); - fsnotify_put_mark(entry); + /* could be an error, could be that we found an existing mark */ + if (tmp_ientry) { + /* on the idr but didn't make it on the inode */ + if (tmp_ientry->wd != -1) + inotify_remove_from_idr(group, tmp_ientry); + kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); } + return ret; } -- cgit v1.2.3 From 520dc2a526fd681337883b6ff1ddcf7c23b1b063 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 13 Jul 2009 15:56:54 -0400 Subject: fsnotify: use def_bool in kconfig instead of letting the user choose fsnotify doens't give the user anything. If someone chooses inotify or dnotify it should build fsnotify, if they don't select one it shouldn't be built. This patch changes fsnotify to be a def_bool=n and makes everything else select it. Also fixes the issue people complained about on lwn where gdm hung because they didn't have inotify and they didn't get the inotify build option..... Signed-off-by: Eric Paris --- fs/notify/Kconfig | 12 +----------- fs/notify/dnotify/Kconfig | 2 +- fs/notify/inotify/Kconfig | 2 +- 3 files changed, 3 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig index 31dac7e3b0f..dffbb0911d0 100644 --- a/fs/notify/Kconfig +++ b/fs/notify/Kconfig @@ -1,15 +1,5 @@ config FSNOTIFY - bool "Filesystem notification backend" - default y - ---help--- - fsnotify is a backend for filesystem notification. fsnotify does - not provide any userspace interface but does provide the basis - needed for other notification schemes such as dnotify, inotify, - and fanotify. - - Say Y here to enable fsnotify suport. - - If unsure, say Y. + def_bool n source "fs/notify/dnotify/Kconfig" source "fs/notify/inotify/Kconfig" diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig index 904ff8d5405..f9c1ca139d8 100644 --- a/fs/notify/dnotify/Kconfig +++ b/fs/notify/dnotify/Kconfig @@ -1,6 +1,6 @@ config DNOTIFY bool "Dnotify support" - depends on FSNOTIFY + select FSNOTIFY default y help Dnotify is a directory-based per-fd file change notification system diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig index 5356884289a..3e56dbffe72 100644 --- a/fs/notify/inotify/Kconfig +++ b/fs/notify/inotify/Kconfig @@ -15,7 +15,7 @@ config INOTIFY config INOTIFY_USER bool "Inotify support for userspace" - depends on FSNOTIFY + select FSNOTIFY default y ---help--- Say Y here to enable inotify support for userspace, including the -- cgit v1.2.3 From 4a148ba988988b9c400ad0f2cbccc155289b954b Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 13 Jul 2009 15:56:55 -0400 Subject: inotify: check filename before dropping repeat events inotify drops events if the last event on the queue is the same as the current event. But it does 2 things wrong. First it is comparing old->inode with new->inode. But after an event if put on the queue the ->inode is no longer allowed to be used. It's possible between the last event and this new event the inode could be reused and we would falsely match the inode's memory address between two differing events. The second problem is that when a file is removed fsnotify is passed the negative dentry for the removed object rather than the postive dentry from immediately before the removal. This mean the (broken) inotify tail drop code was matching the NULL ->inode of differing events. The fix is to check the file name which is stored with events when doing the tail drop instead of wrongly checking the address of the stored ->inode. Reported-by: Scott James Remnant Signed-off-by: Eric Paris --- fs/notify/notification.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 959b73e756f..69391fe8efb 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -136,10 +136,15 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new { if ((old->mask == new->mask) && (old->to_tell == new->to_tell) && - (old->data_type == new->data_type)) { + (old->data_type == new->data_type) && + (old->name_len == new->name_len)) { switch (old->data_type) { case (FSNOTIFY_EVENT_INODE): - if (old->inode == new->inode) + /* remember, after old was put on the wait_q we aren't + * allowed to look at the inode any more, only thing + * left to check was if the file_name is the same */ + if (old->name_len && + !strcmp(old->file_name, new->file_name)) return true; break; case (FSNOTIFY_EVENT_PATH): -- cgit v1.2.3 From c05594b62125c528d93af3a78229793aae36df7f Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 13 Jul 2009 15:56:55 -0400 Subject: fsnotify: fix inotify tail drop check with path entries fsnotify drops new events when they are the same as the tail event on the queue to be sent to userspace. The problem is that if the event comes with a path we forget to break out of the switch statement and fall into the code path which matches on events that do not have any type of file backed information (things like IN_UNMOUNT and IN_Q_OVERFLOW). The problem is that this code thinks all such events should be dropped. Fix is to add a break. Signed-off-by: Eric Paris --- fs/notify/notification.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 69391fe8efb..2b20feaf263 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -151,6 +151,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new if ((old->path.mnt == new->path.mnt) && (old->path.dentry == new->path.dentry)) return true; + break; case (FSNOTIFY_EVENT_NONE): return true; }; -- cgit v1.2.3 From f44aebcc566d1d6275f7191867b9633dc11de2ee Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 15 Jul 2009 15:49:52 -0400 Subject: inotify: use GFP_NOFS under potential memory pressure inotify can have a watchs removed under filesystem reclaim. ================================= [ INFO: inconsistent lock state ] 2.6.31-rc2 #16 --------------------------------- inconsistent {IN-RECLAIM_FS-W} -> {RECLAIM_FS-ON-W} usage. khubd/217 [HC0[0]:SC0[0]:HE1:SE1] takes: (iprune_mutex){+.+.?.}, at: [] invalidate_inodes+0x20/0xe3 {IN-RECLAIM_FS-W} state was registered at: [] __lock_acquire+0x2c9/0xac4 [] lock_acquire+0x9f/0xc2 [] __mutex_lock_common+0x2d/0x323 [] mutex_lock_nested+0x2e/0x36 [] shrink_icache_memory+0x38/0x1b2 [] shrink_slab+0xe2/0x13c [] kswapd+0x3d1/0x55d [] kthread+0x66/0x6b [] kernel_thread_helper+0x7/0x10 [] 0xffffffff Two things are needed to fix this. First we need a method to tell fsnotify_create_event() to use GFP_NOFS and second we need to stop using one global IN_IGNORED event and allocate them one at a time. This solves current issues with multiple IN_IGNORED on a queue having tail drop problems and simplifies the allocations since we don't have to worry about two tasks opperating on the IGNORED event concurrently. Signed-off-by: Eric Paris --- fs/notify/fsnotify.c | 4 +++- fs/notify/inotify/inotify_user.c | 18 ++++++++++++------ fs/notify/notification.c | 9 +++++---- 3 files changed, 20 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ec2f7bd7681..037e878e03f 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -159,7 +159,9 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const if (!group->ops->should_send_event(group, to_tell, mask)) continue; if (!event) { - event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie); + event = fsnotify_create_event(to_tell, mask, data, + data_is, file_name, cookie, + GFP_KERNEL); /* shit, we OOM'd and now we can't tell, maybe * someday someone else will want to do something * here */ diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 726118a5845..f30d9bbc2e1 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly; static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; struct kmem_cache *event_priv_cachep __read_mostly; -static struct fsnotify_event *inotify_ignored_event; /* * When inotify registers a new group it increments this and uses that @@ -384,12 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) { struct inotify_inode_mark_entry *ientry; + struct fsnotify_event *ignored_event; struct inotify_event_private_data *event_priv; struct fsnotify_event_private_data *fsn_event_priv; + ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, + FSNOTIFY_EVENT_NONE, NULL, 0, + GFP_NOFS); + if (!ignored_event) + return; + ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); - event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); + event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); if (unlikely(!event_priv)) goto skip_send_ignore; @@ -398,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, fsn_event_priv->group = group; event_priv->wd = ientry->wd; - fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); + fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); /* did the private data get added? */ if (list_empty(&fsn_event_priv->event_list)) @@ -406,6 +412,9 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, skip_send_ignore: + /* matches the reference taken when the event was created */ + fsnotify_put_event(ignored_event); + /* remove this entry from the idr */ inotify_remove_from_idr(group, ientry); @@ -748,9 +757,6 @@ static int __init inotify_user_setup(void) inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); - inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0); - if (!inotify_ignored_event) - panic("unable to allocate the inotify ignored event\n"); inotify_max_queued_events = 16384; inotify_max_user_instances = 128; diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 2b20feaf263..521368574e9 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -153,7 +153,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new return true; break; case (FSNOTIFY_EVENT_NONE): - return true; + return false; }; } return false; @@ -345,18 +345,19 @@ static void initialize_event(struct fsnotify_event *event) * @name the filename, if available */ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, - int data_type, const char *name, u32 cookie) + int data_type, const char *name, u32 cookie, + gfp_t gfp) { struct fsnotify_event *event; - event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); + event = kmem_cache_alloc(fsnotify_event_cachep, gfp); if (!event) return NULL; initialize_event(event); if (name) { - event->file_name = kstrdup(name, GFP_KERNEL); + event->file_name = kstrdup(name, gfp); if (!event->file_name) { kmem_cache_free(fsnotify_event_cachep, event); return NULL; -- cgit v1.2.3 From b64aec8d1e1d8482a7b6cca60c8105c756bf1fe4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 21 Jul 2009 16:47:46 -0400 Subject: NFSv4: Fix an Oops in nfs4_free_lock_state The oops http://www.kerneloops.org/raw.php?rawid=537858&msgid= appears to be due to the nfs4_lock_state->ls_state field being uninitialised. This happens if the call to nfs4_free_lock_state() is triggered at the end of nfs4_get_lock_state(). The fix is to move the initialisation of ls_state into the allocator. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b73c5a72865..65ca8c18476 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -553,6 +553,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f INIT_LIST_HEAD(&lsp->ls_sequence.list); lsp->ls_seqid.sequence = &lsp->ls_sequence; atomic_set(&lsp->ls_count, 1); + lsp->ls_state = state; lsp->ls_owner = fl_owner; spin_lock(&clp->cl_lock); nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); @@ -587,7 +588,6 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ if (lsp != NULL) break; if (new != NULL) { - new->ls_state = state; list_add(&new->ls_locks, &state->lock_states); set_bit(LK_STATE_IN_USE, &state->flags); lsp = new; -- cgit v1.2.3 From fccba8045537f7e840d0e7565e1989d465e488a3 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 21 Jul 2009 16:48:07 -0400 Subject: NFSv4: Fix an NFSv4 mount regression Commit 008f55d0e019943323c20a03493a2ba5672a4cc8 (nfs41: recover lease in _nfs4_lookup_root) forces the state manager to always run on mount. This is a bug in the case of NFSv4.0, which doesn't require us to send a setclientid until we want to grab file state. In any case, this is completely the wrong place to be doing state management. Moving that code into nfs4_init_session... Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 18 +++--------------- fs/nfs/nfs4_fs.h | 6 ++++++ fs/nfs/nfs4proc.c | 24 +++++++++++++++++------- 3 files changed, 26 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index c2d061675d8..8d25ccb2d51 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1242,20 +1242,6 @@ error: return error; } -/* - * Initialize a session. - * Note: save the mount rsize and wsize for create_server negotiation. - */ -static void nfs4_init_session(struct nfs_client *clp, - unsigned int wsize, unsigned int rsize) -{ -#if defined(CONFIG_NFS_V4_1) - if (nfs4_has_session(clp)) { - clp->cl_session->fc_attrs.max_rqst_sz = wsize; - clp->cl_session->fc_attrs.max_resp_sz = rsize; - } -#endif /* CONFIG_NFS_V4_1 */ -} /* * Session has been established, and the client marked ready. @@ -1350,7 +1336,9 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data, BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); - nfs4_init_session(server->nfs_client, server->wsize, server->rsize); + error = nfs4_init_session(server); + if (error < 0) + goto error; /* Probe the root fh to retrieve its FSID */ error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 61bc3a32e1e..6ea07a3c75d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -220,6 +220,7 @@ extern void nfs4_destroy_session(struct nfs4_session *session); extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp); extern int nfs4_proc_create_session(struct nfs_client *, int reset); extern int nfs4_proc_destroy_session(struct nfs4_session *); +extern int nfs4_init_session(struct nfs_server *server); #else /* CONFIG_NFS_v4_1 */ static inline int nfs4_setup_sequence(struct nfs_client *clp, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, @@ -227,6 +228,11 @@ static inline int nfs4_setup_sequence(struct nfs_client *clp, { return 0; } + +static inline int nfs4_init_session(struct nfs_server *server) +{ + return 0; +} #endif /* CONFIG_NFS_V4_1 */ extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[]; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ff0c080db59..df24f67bca6 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2040,15 +2040,9 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_argp = &args, .rpc_resp = &res, }; - int status; nfs_fattr_init(info->fattr); - status = nfs4_recover_expired_lease(server); - if (!status) - status = nfs4_check_client_ready(server->nfs_client); - if (!status) - status = nfs4_call_sync(server, &msg, &args, &res, 0); - return status; + return nfs4_call_sync(server, &msg, &args, &res, 0); } static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, @@ -4793,6 +4787,22 @@ int nfs4_proc_destroy_session(struct nfs4_session *session) return status; } +int nfs4_init_session(struct nfs_server *server) +{ + struct nfs_client *clp = server->nfs_client; + int ret; + + if (!nfs4_has_session(clp)) + return 0; + + clp->cl_session->fc_attrs.max_rqst_sz = server->wsize; + clp->cl_session->fc_attrs.max_resp_sz = server->rsize; + ret = nfs4_recover_expired_lease(server); + if (!ret) + ret = nfs4_check_client_ready(clp); + return ret; +} + /* * Renew the cl_session lease. */ -- cgit v1.2.3 From d953126a28f97ec965d23c69fd5795854c048f30 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 21 Jul 2009 19:22:38 -0400 Subject: NFSv4: Fix a problem whereby a buggy server can oops the kernel We just had a case in which a buggy server occasionally returns the wrong attributes during an OPEN call. While the client does catch this sort of condition in nfs4_open_done(), and causes the nfs4_atomic_open() to return -EISDIR, the logic in nfs_atomic_lookup() is broken, since it causes a fallback to an ordinary lookup instead of just returning the error. When the buggy server then returns a regular file for the fallback lookup, the VFS allows the open, and bad things start to happen, since the open file doesn't have any associated NFSv4 state. The fix is firstly to return the EISDIR/ENOTDIR errors immediately, and secondly to ensure that we are always careful when dereferencing the nfs_open_context state pointer. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- fs/nfs/nfs4proc.c | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 38d42c29fb9..32062c33c85 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry res = NULL; goto out; /* This turned out not to be a regular file */ - case -EISDIR: case -ENOTDIR: goto no_open; case -ELOOP: if (!(nd->intent.open.flags & O_NOFOLLOW)) goto no_open; + /* case -EISDIR: */ /* case -EINVAL: */ default: goto out; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index df24f67bca6..6917311f201 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4093,15 +4093,23 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) if (request->fl_start < 0 || request->fl_end < 0) return -EINVAL; - if (IS_GETLK(cmd)) - return nfs4_proc_getlk(state, F_GETLK, request); + if (IS_GETLK(cmd)) { + if (state != NULL) + return nfs4_proc_getlk(state, F_GETLK, request); + return 0; + } if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) return -EINVAL; - if (request->fl_type == F_UNLCK) - return nfs4_proc_unlck(state, cmd, request); + if (request->fl_type == F_UNLCK) { + if (state != NULL) + return nfs4_proc_unlck(state, cmd, request); + return 0; + } + if (state == NULL) + return -ENOLCK; do { status = nfs4_proc_setlk(state, cmd, request); if ((status != -EAGAIN) || IS_SETLK(cmd)) -- cgit v1.2.3 From 1bec1aed1e7e632b3cc43b6807c2b4dcd1572e28 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Wed, 22 Jul 2009 09:59:00 -0400 Subject: Btrfs: fix definition of struct btrfs_extent_inline_ref use __le64 instead of u64 in on-disk structure definition. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a404ecc53eb..da0763135bf 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -483,7 +483,7 @@ struct btrfs_shared_data_ref { struct btrfs_extent_inline_ref { u8 type; - u64 offset; + __le64 offset; } __attribute__ ((__packed__)); /* old style backrefs item */ -- cgit v1.2.3 From bf1fb512a58d7aeb41aaa40d6d2d2d29e08e506a Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Wed, 22 Jul 2009 09:59:00 -0400 Subject: Btrfs: properly update space information after shrinking device. Change 'goto done' to 'break' for the case of all device extents have been freed, so that the code updates space information will be execute. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3ab80e9cd76..f057730a72b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2007,7 +2007,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) goto done; if (ret) { ret = 0; - goto done; + break; } l = path->nodes[0]; @@ -2015,7 +2015,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) - goto done; + break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); -- cgit v1.2.3 From e457afec60fdbd86b963d36f4a8a9285088c6043 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Wed, 22 Jul 2009 09:59:00 -0400 Subject: Btrfs: fix double increment of path->slots[0] in btrfs_next_leaf if 1 is returned by btrfs_search_slot, the path already points to the first item with 'key > searching key'. So increasing path->slots[0] by one is superfluous in that case. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 60a45f3a4e9..7bb66c65ddf 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4146,7 +4146,8 @@ again: * advance the path if there are now more items available. */ if (nritems > 0 && path->slots[0] < nritems - 1) { - path->slots[0]++; + if (ret == 0) + path->slots[0]++; ret = 0; goto done; } -- cgit v1.2.3 From 33c66f430bfa3a033e70470e4c93f967156b696d Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Wed, 22 Jul 2009 09:59:00 -0400 Subject: Btrfs: fix locking issue in btrfs_find_next_key When walking up the tree, btrfs_find_next_key assumes the upper level tree block is properly locked. This isn't always true even path->keep_locks is 1. This is because btrfs_find_next_key may advance path->slots[] several times instead of only once. When 'path->slots[level] >= btrfs_header_nritems(path->nodes[level])' is found, we can't guarantee the original value of 'path->slots[level]' is 'btrfs_header_nritems(path->nodes[level]) - 1'. If it's not, the tree block at 'level + 1' isn't locked. This patch fixes the issue by explicitly checking the locking state, re-searching the tree if it's not locked. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 95 +++++++++++++++++++++++++++++++++------------------ fs/btrfs/relocation.c | 3 ++ 2 files changed, 65 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 7bb66c65ddf..fdd423a550d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1701,6 +1701,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root struct extent_buffer *b; int slot; int ret; + int err; int level; int lowest_unlock = 1; u8 lowest_level = 0; @@ -1737,8 +1738,6 @@ again: p->locks[level] = 1; if (cow) { - int wret; - /* * if we don't really need to cow this block * then we don't want to set the path blocking, @@ -1749,12 +1748,12 @@ again: btrfs_set_path_blocking(p); - wret = btrfs_cow_block(trans, root, b, - p->nodes[level + 1], - p->slots[level + 1], &b); - if (wret) { + err = btrfs_cow_block(trans, root, b, + p->nodes[level + 1], + p->slots[level + 1], &b); + if (err) { free_extent_buffer(b); - ret = wret; + ret = err; goto done; } } @@ -1793,41 +1792,45 @@ cow_done: ret = bin_search(b, key, level, &slot); if (level != 0) { - if (ret && slot > 0) + int dec = 0; + if (ret && slot > 0) { + dec = 1; slot -= 1; + } p->slots[level] = slot; - ret = setup_nodes_for_search(trans, root, p, b, level, + err = setup_nodes_for_search(trans, root, p, b, level, ins_len); - if (ret == -EAGAIN) + if (err == -EAGAIN) goto again; - else if (ret) + if (err) { + ret = err; goto done; + } b = p->nodes[level]; slot = p->slots[level]; unlock_up(p, level, lowest_unlock); - /* this is only true while dropping a snapshot */ if (level == lowest_level) { - ret = 0; + if (dec) + p->slots[level]++; goto done; } - ret = read_block_for_search(trans, root, p, + err = read_block_for_search(trans, root, p, &b, level, slot, key); - if (ret == -EAGAIN) + if (err == -EAGAIN) goto again; - - if (ret == -EIO) + if (err) { + ret = err; goto done; + } if (!p->skip_locking) { - int lret; - btrfs_clear_path_blocking(p, NULL); - lret = btrfs_try_spin_lock(b); + err = btrfs_try_spin_lock(b); - if (!lret) { + if (!err) { btrfs_set_path_blocking(p); btrfs_tree_lock(b); btrfs_clear_path_blocking(p, b); @@ -1837,16 +1840,14 @@ cow_done: p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < ins_len) { - int sret; - btrfs_set_path_blocking(p); - sret = split_leaf(trans, root, key, - p, ins_len, ret == 0); + err = split_leaf(trans, root, key, + p, ins_len, ret == 0); btrfs_clear_path_blocking(p, NULL); - BUG_ON(sret > 0); - if (sret) { - ret = sret; + BUG_ON(err > 0); + if (err) { + ret = err; goto done; } } @@ -4042,10 +4043,9 @@ out: * calling this function. */ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *key, int lowest_level, + struct btrfs_key *key, int level, int cache_only, u64 min_trans) { - int level = lowest_level; int slot; struct extent_buffer *c; @@ -4058,11 +4058,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, c = path->nodes[level]; next: if (slot >= btrfs_header_nritems(c)) { - level++; - if (level == BTRFS_MAX_LEVEL) + int ret; + int orig_lowest; + struct btrfs_key cur_key; + if (level + 1 >= BTRFS_MAX_LEVEL || + !path->nodes[level + 1]) return 1; - continue; + + if (path->locks[level + 1]) { + level++; + continue; + } + + slot = btrfs_header_nritems(c) - 1; + if (level == 0) + btrfs_item_key_to_cpu(c, &cur_key, slot); + else + btrfs_node_key_to_cpu(c, &cur_key, slot); + + orig_lowest = path->lowest_level; + btrfs_release_path(root, path); + path->lowest_level = level; + ret = btrfs_search_slot(NULL, root, &cur_key, path, + 0, 0); + path->lowest_level = orig_lowest; + if (ret < 0) + return ret; + + c = path->nodes[level]; + slot = path->slots[level]; + if (ret == 0) + slot++; + goto next; } + if (level == 0) btrfs_item_key_to_cpu(c, key, slot); else { diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 00839793477..e71264d1c2c 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -670,6 +670,8 @@ again: err = ret; goto out; } + if (ret > 0 && path2->slots[level] > 0) + path2->slots[level]--; eb = path2->nodes[level]; WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != @@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, BUG_ON(level == 0); path->lowest_level = level; ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); + path->lowest_level = 0; if (ret < 0) { btrfs_free_path(path); return ret; -- cgit v1.2.3 From 4a8c9a62d7f7f058eed4b8a6f2c890a887778093 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Wed, 22 Jul 2009 10:07:05 -0400 Subject: Btrfs: make sure all dirty blocks are written at commit time Write dirty block groups may allocate new block, and so may add new delayed back ref. btrfs_run_delayed_refs may make some block groups dirty. commit_cowonly_roots does not handle the recursion properly, and some dirty blocks can be left unwritten at commit time. This patch moves btrfs_run_delayed_refs into the loop that writes dirty block groups, and makes the code not break out of the loop until there are no dirty block groups or delayed back refs. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 70 +++++++++++++++++++++++++++++--------------------- fs/btrfs/transaction.c | 9 +------ 2 files changed, 42 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a5aca3997d4..62a332d34fd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2387,13 +2387,29 @@ fail: } +static struct btrfs_block_group_cache * +next_block_group(struct btrfs_root *root, + struct btrfs_block_group_cache *cache) +{ + struct rb_node *node; + spin_lock(&root->fs_info->block_group_cache_lock); + node = rb_next(&cache->cache_node); + btrfs_put_block_group(cache); + if (node) { + cache = rb_entry(node, struct btrfs_block_group_cache, + cache_node); + atomic_inc(&cache->count); + } else + cache = NULL; + spin_unlock(&root->fs_info->block_group_cache_lock); + return cache; +} + int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - struct btrfs_block_group_cache *cache, *entry; - struct rb_node *n; + struct btrfs_block_group_cache *cache; int err = 0; - int werr = 0; struct btrfs_path *path; u64 last = 0; @@ -2402,39 +2418,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, return -ENOMEM; while (1) { - cache = NULL; - spin_lock(&root->fs_info->block_group_cache_lock); - for (n = rb_first(&root->fs_info->block_group_cache_tree); - n; n = rb_next(n)) { - entry = rb_entry(n, struct btrfs_block_group_cache, - cache_node); - if (entry->dirty) { - cache = entry; - break; - } + if (last == 0) { + err = btrfs_run_delayed_refs(trans, root, + (unsigned long)-1); + BUG_ON(err); } - spin_unlock(&root->fs_info->block_group_cache_lock); - if (!cache) - break; + cache = btrfs_lookup_first_block_group(root->fs_info, last); + while (cache) { + if (cache->dirty) + break; + cache = next_block_group(root, cache); + } + if (!cache) { + if (last == 0) + break; + last = 0; + continue; + } cache->dirty = 0; - last += cache->key.offset; + last = cache->key.objectid + cache->key.offset; - err = write_one_cache_group(trans, root, - path, cache); - /* - * if we fail to write the cache group, we want - * to keep it marked dirty in hopes that a later - * write will work - */ - if (err) { - werr = err; - continue; - } + err = write_one_cache_group(trans, root, path, cache); + BUG_ON(err); + btrfs_put_block_group(cache); } + btrfs_free_path(path); - return werr; + return 0; } int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2dbf1c1f56e..81f7124c305 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -444,9 +444,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, btrfs_write_dirty_block_groups(trans, root); - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - BUG_ON(ret); - while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start) @@ -457,9 +454,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, &root->root_key, &root->root_item); BUG_ON(ret); - btrfs_write_dirty_block_groups(trans, root); - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + ret = btrfs_write_dirty_block_groups(trans, root); BUG_ON(ret); } free_extent_buffer(root->commit_root); @@ -495,9 +491,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, root = list_entry(next, struct btrfs_root, dirty_list); update_cowonly_root(trans, root); - - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - BUG_ON(ret); } return 0; } -- cgit v1.2.3 From 023d43c7b5a23a81fe8afa9f37296f8ed4be11fb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Jul 2009 10:09:23 +0200 Subject: lockdep: Fix lockdep annotation for pipe_double_lock() The presumed use of the pipe_double_lock() routine is to lock 2 locks in a deadlock free way by ordering the locks by their address. However it fails to keep the specified lock classes in order and explicitly annotates a deadlock. Rectify this. Signed-off-by: Peter Zijlstra Acked-by: Miklos Szeredi LKML-Reference: <1248163763.15751.11098.camel@twins> --- fs/pipe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/pipe.c b/fs/pipe.c index f7dd21ad85a..52c41511483 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -68,8 +68,8 @@ void pipe_double_lock(struct pipe_inode_info *pipe1, pipe_lock_nested(pipe1, I_MUTEX_PARENT); pipe_lock_nested(pipe2, I_MUTEX_CHILD); } else { - pipe_lock_nested(pipe2, I_MUTEX_CHILD); - pipe_lock_nested(pipe1, I_MUTEX_PARENT); + pipe_lock_nested(pipe2, I_MUTEX_PARENT); + pipe_lock_nested(pipe1, I_MUTEX_CHILD); } } -- cgit v1.2.3 From 29c5e8ce01f9dad7e24b99c21e4f836d6b0289e0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 22 Jul 2009 16:49:00 -0400 Subject: Btrfs: convert nested spin_lock_irqsave to spin_lock If spin_lock_irqsave is called twice in a row with the same second argument, the interrupt state at the point of the second call overwrites the value saved by the first call. Indeed, the second call does not need to save the interrupt state, so it is changed to a simple spin_lock. Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/async-thread.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 6e4f6c50a12..019e8af449a 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work) * list */ if (worker->idle) { - spin_lock_irqsave(&worker->workers->lock, flags); + spin_lock(&worker->workers->lock); worker->idle = 0; list_move_tail(&worker->worker_list, &worker->workers->worker_list); - spin_unlock_irqrestore(&worker->workers->lock, flags); + spin_unlock(&worker->workers->lock); } if (!worker->working) { wake = 1; -- cgit v1.2.3 From 3acada49c2794c5aac21849e2ea05790c6dd2faa Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 22 Jul 2009 16:49:01 -0400 Subject: Btrfs: Remove broken sanity check from btrfs_rmap_block() It was never actually doing anything anyway (see the loop condition), and it would be difficult to make it work for RAID[56]. Even if it was actually working, it's checking for the wrong thing anyway. Instead of checking whether we list a block which _doesn't_ land at the relevant physical location, it should be checking that we _have_ listed all the logical blocks which refer to the required physical location on all devices. This function is only called from remove_sb_from_cache() to ensure that we reserve the logical blocks which would reside at the same physical location as the superblock copies. So listing more blocks than we need is actually OK. With RAID[56] we're going to throw away an entire stripe for each block we have to ignore, so we _are_ going to list blocks other than the ones which actually contain the superblock. Signed-off-by: David Woodhouse Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f057730a72b..55c37276a29 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2795,26 +2795,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, } } - for (i = 0; i > nr; i++) { - struct btrfs_multi_bio *multi; - struct btrfs_bio_stripe *stripe; - int ret; - - length = 1; - ret = btrfs_map_block(map_tree, WRITE, buf[i], - &length, &multi, 0); - BUG_ON(ret); - - stripe = multi->stripes; - for (j = 0; j < multi->num_stripes; j++) { - if (stripe->physical >= physical && - physical < stripe->physical + length) - break; - } - BUG_ON(j >= multi->num_stripes); - kfree(multi); - } - *logical = buf; *naddrs = nr; *stripe_len = map->stripe_len; -- cgit v1.2.3 From 33c17ad5717c887568c1de61f15e5d58ed66d189 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 22 Jul 2009 16:49:01 -0400 Subject: Btrfs: adjust NULL test Move the call to BUG_ON to before the dereference of the tested value. Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a48c084f6d3..3ea827ddf0f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2607,8 +2607,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (root->ref_cows) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); path = btrfs_alloc_path(); - path->reada = -1; BUG_ON(!path); + path->reada = -1; /* FIXME, add redo link to tree so we don't leak on crash */ key.objectid = inode->i_ino; -- cgit v1.2.3 From c271b492419a18908ba19ee02b231fb305a27023 Mon Sep 17 00:00:00 2001 From: Daniel Cadete Date: Wed, 22 Jul 2009 16:52:13 -0400 Subject: Btrfs: remove of redundant btrfs_header_level This removes the continues call's of btrfs_header_level. One call of btrfs_header_level(c) its enough. Signed-off-by Daniel Cadete Signed-off-by: Chris Mason --- fs/btrfs/print-tree.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 6d6523da0a3..0d126be22b6 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) } printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", (unsigned long long)btrfs_header_bytenr(c), - btrfs_header_level(c), nr, + level, nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); @@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) btrfs_level_size(root, level - 1), btrfs_node_ptr_generation(c, i)); if (btrfs_is_leaf(next) && - btrfs_header_level(c) != 1) + level != 1) BUG(); if (btrfs_header_level(next) != - btrfs_header_level(c) - 1) + level - 1) BUG(); btrfs_print_tree(root, next); free_extent_buffer(next); -- cgit v1.2.3 From 83121942b28daffc9526b14b7843d8cdbd3db641 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 22 Jul 2009 16:52:13 -0400 Subject: Btrfs: Fix crash on read failures at mount If the tree roots hit read errors during mount, btrfs is not properly erroring out. We need to check the uptodate bits after reading in the tree root node. Signed-off-by: David Woodhouse Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0d50d49d990..55d9d188e69 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1783,6 +1783,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_super_chunk_root(disk_super), blocksize, generation); BUG_ON(!chunk_root->node); + if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { + printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", + sb->s_id); + goto fail_chunk_root; + } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); chunk_root->commit_root = btrfs_root_node(chunk_root); @@ -1810,6 +1815,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, blocksize, generation); if (!tree_root->node) goto fail_chunk_root; + if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { + printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", + sb->s_id); + goto fail_tree_root; + } btrfs_set_root_node(&tree_root->root_item, tree_root->node); tree_root->commit_root = btrfs_root_node(tree_root); -- cgit v1.2.3 From ce6e7fcd43aab1f77e56aa36936dd7d2d05a1ffa Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 22 Jul 2009 15:08:58 -0400 Subject: cifs: disable serverino if server doesn't support it A recent regression when dealing with older servers. This bug was introduced when we made serverino the default... When the server can't provide inode numbers, disable it for the mount. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 18afe57b246..b6a47b32f21 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -513,9 +513,12 @@ int cifs_get_inode_info(struct inode **pinode, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc1) { - /* BB EOPNOSUPP disable SERVER_INUM? */ cFYI(1, ("GetSrvInodeNum rc %d", rc1)); fattr.cf_uniqueid = iunique(sb, ROOT_I); + /* disable serverino if call not supported */ + if (rc1 == -EINVAL) + cifs_sb->mnt_cifs_flags &= + ~CIFS_MOUNT_SERVER_INUM; } } else { fattr.cf_uniqueid = iunique(sb, ROOT_I); -- cgit v1.2.3 From 03aa3a49ad3592a9e4e1ab19c6da3e852288caf1 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 21 Jul 2009 19:42:03 -0400 Subject: cifs: fix sb->s_maxbytes so that it casts properly to a signed value This off-by-one bug causes sendfile() to not work properly. When a task calls sendfile() on a file on a CIFS filesystem, the syscall returns -1 and sets errno to EOVERFLOW. do_sendfile uses s_maxbytes to verify the returned offset of the file. The problem there is that this value is cast to a signed value (loff_t). When this is done on the s_maxbytes value that cifs uses, it becomes negative and the comparisons against it fail. Even though s_maxbytes is an unsigned value, it seems that it's not OK to set it in such a way that it'll end up negative when it's cast to a signed value. These casts happen in other codepaths besides sendfile too, but the VFS is a little hard to follow in this area and I can't be sure if there are other bugs that this will fix. It's not clear to me why s_maxbytes isn't just declared as loff_t in the first place, but either way we still need to fix these values to make sendfile work properly. This is also an opportunity to replace the magic bit-shift values here with the standard #defines for this. This fixes the reproducer program I have that does a sendfile and will probably also fix the situation where apache is serving from a CIFS share. Acked-by: Johannes Weiner Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9bb5c875073..fc44d316d0b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2452,10 +2452,10 @@ try_mount_again: tcon->local_lease = volume_info->local_lease; } if (pSesInfo) { - if (pSesInfo->capabilities & CAP_LARGE_FILES) { - sb->s_maxbytes = (u64) 1 << 63; - } else - sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */ + if (pSesInfo->capabilities & CAP_LARGE_FILES) + sb->s_maxbytes = MAX_LFS_FILESIZE; + else + sb->s_maxbytes = MAX_NON_LFS; } /* BB FIXME fix time_gran to be larger for LANMAN sessions */ -- cgit v1.2.3 From f1230c97978f52268d8c66e6f88e54c3d2092a75 Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 22 Jul 2009 23:13:01 +0000 Subject: [CIFS] fix sparse warning Signed-off-by: Steve French --- fs/cifs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index b6a47b32f21..82d83839655 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -212,7 +212,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, * junction to the new submount (ie to setup the fake directory * which represents a DFS referral). */ -void +static void cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); @@ -388,7 +388,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, } /* Fill a cifs_fattr struct with info from FILE_ALL_INFO */ -void +static void cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, struct cifs_sb_info *cifs_sb, bool adjust_tz) { -- cgit v1.2.3 From 4a19fb11a90fdbbcb3bc02effa036230d035ca28 Mon Sep 17 00:00:00 2001 From: Stefan Bader Date: Thu, 23 Jul 2009 11:26:05 +0200 Subject: jfs: Fix early release of acl in jfs_get_acl BugLink: http://bugs.launchpad.net/ubuntu/+bug/396780 Commit 073aaa1b142461d91f83da66db1184d7c1b1edea "helpers for acl caching + switch to those" introduced new helper functions for acl handling but seems to have introduced a regression for jfs as the acl is released before returning it to the caller, instead of leaving this for the caller to do. This causes the acl object to be used after freeing it, leading to kernel panics in completely different places. Thanks to Christophe Dumez for reporting and bisecting into this. Reported-by: Christophe Dumez Tested-by: Christophe Dumez Signed-off-by: Stefan Bader Acked-by: Andy Whitcroft Signed-off-by: Dave Kleikamp --- fs/jfs/acl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 91fa3ad6e8c..a29c7c3e3fb 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type) acl = posix_acl_from_xattr(value, size); } kfree(value); - if (!IS_ERR(acl)) { + if (!IS_ERR(acl)) set_cached_acl(inode, type, acl); - posix_acl_release(acl); - } return acl; } -- cgit v1.2.3 From 963030817060e4f109be1993b9ae8f81dbf5e11a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 13 Jul 2009 21:29:25 -0400 Subject: Btrfs: use hybrid extents+bitmap rb tree for free space Currently btrfs has a problem where it can use a ridiculous amount of RAM simply tracking free space. As free space gets fragmented, we end up with thousands of entries on an rb-tree per block group, which usually spans 1 gig of area. Since we currently don't ever flush free space cache back to disk this gets to be a bit unweildly on large fs's with lots of fragmentation. This patch solves this problem by using PAGE_SIZE bitmaps for parts of the free space cache. Initially we calculate a threshold of extent entries we can handle, which is however many extent entries we can cram into 16k of ram. The maximum amount of RAM that should ever be used to track 1 gigabyte of diskspace will be 32k of RAM, which scales much better than we did before. Once we pass the extent threshold, we start adding bitmaps and using those instead for tracking the free space. This patch also makes it so that any free space thats less than 4 * sectorsize we go ahead and put into a bitmap. This is nice since we try and allocate out of the front of a block group, so if the front of a block group is heavily fragmented and then has a huge chunk of free space at the end, we go ahead and add the fragmented areas to bitmaps and use a normal extent entry to track the big chunk at the back of the block group. I've also taken the opportunity to revamp how we search for free space. Previously we indexed free space via an offset indexed rb tree and a bytes indexed rb tree. I've dropped the bytes indexed rb tree and use only the offset indexed rb tree. This cuts the number of tree operations we were doing previously down by half, and gives us a little bit of a better allocation pattern since we will always start from a specific offset and search forward from there, instead of searching for the size we need and try and get it as close as possible to the offset we want. I've given this a healthy amount of testing pre-new format stuff, as well as post-new format stuff. I've booted up my fedora box which is installed on btrfs with this patch and ran with it for a few days without issues. I've not seen any performance regressions in any of my tests. Since the last patch Yan Zheng fixed a problem where we could have overlapping entries, so updating their offset inline would cause problems. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 8 +- fs/btrfs/extent-tree.c | 25 +- fs/btrfs/free-space-cache.c | 1001 ++++++++++++++++++++++++++++++++++--------- fs/btrfs/free-space-cache.h | 8 + 4 files changed, 826 insertions(+), 216 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index da0763135bf..0cbf3491bb7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -709,6 +709,9 @@ struct btrfs_free_cluster { /* first extent starting offset */ u64 window_start; + /* if this cluster simply points at a bitmap in the block group */ + bool points_to_bitmap; + struct btrfs_block_group_cache *block_group; /* * when a cluster is allocated from a block group, we put the @@ -726,6 +729,10 @@ struct btrfs_block_group_cache { u64 pinned; u64 reserved; u64 flags; + u64 sectorsize; + int extents_thresh; + int free_extents; + int total_bitmaps; int cached; int ro; int dirty; @@ -734,7 +741,6 @@ struct btrfs_block_group_cache { /* free space cache stuff */ spinlock_t tree_lock; - struct rb_root free_space_bytes; struct rb_root free_space_offset; /* block group cache stuff */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 62a332d34fd..98697be6bdd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3649,7 +3649,6 @@ refill_cluster: goto loop; checks: search_start = stripe_align(root, offset); - /* move on to the next group */ if (search_start + num_bytes >= search_end) { btrfs_add_free_space(block_group, offset, num_bytes); @@ -7040,6 +7039,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) mutex_init(&cache->cache_mutex); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + cache->sectorsize = root->sectorsize; + + /* + * we only want to have 32k of ram per block group for keeping + * track of free space, and if we pass 1/2 of that we want to + * start converting things over to using bitmaps + */ + cache->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); + read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -7091,6 +7100,15 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->key.objectid = chunk_offset; cache->key.offset = size; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + cache->sectorsize = root->sectorsize; + + /* + * we only want to have 32k of ram per block group for keeping track + * of free space, and if we pass 1/2 of that we want to start + * converting things over to using bitmaps + */ + cache->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); @@ -7103,6 +7121,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->flags = type; btrfs_set_block_group_flags(&cache->item, type); + cache->cached = 1; + ret = btrfs_add_free_space(cache, chunk_offset, size); + BUG_ON(ret); + remove_sb_from_cache(root, cache); + ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, &cache->space_info); BUG_ON(ret); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4538e48581a..ab8cad8b46c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -16,45 +16,46 @@ * Boston, MA 021110-1307, USA. */ +#include #include +#include #include "ctree.h" #include "free-space-cache.h" #include "transaction.h" -struct btrfs_free_space { - struct rb_node bytes_index; - struct rb_node offset_index; - u64 offset; - u64 bytes; -}; +#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) -static int tree_insert_offset(struct rb_root *root, u64 offset, - struct rb_node *node) +static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, + u64 offset) { - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct btrfs_free_space *info; + BUG_ON(offset < bitmap_start); + offset -= bitmap_start; + return (unsigned long)(div64_u64(offset, sectorsize)); +} - while (*p) { - parent = *p; - info = rb_entry(parent, struct btrfs_free_space, offset_index); +static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) +{ + return (unsigned long)(div64_u64(bytes, sectorsize)); +} - if (offset < info->offset) - p = &(*p)->rb_left; - else if (offset > info->offset) - p = &(*p)->rb_right; - else - return -EEXIST; - } +static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, + u64 offset) +{ + u64 bitmap_start; + u64 bytes_per_bitmap; - rb_link_node(node, parent, p); - rb_insert_color(node, root); + bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; + bitmap_start = offset - block_group->key.objectid; + bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); + bitmap_start *= bytes_per_bitmap; + bitmap_start += block_group->key.objectid; - return 0; + return bitmap_start; } -static int tree_insert_bytes(struct rb_root *root, u64 bytes, - struct rb_node *node) +static int tree_insert_offset(struct rb_root *root, u64 offset, + struct rb_node *node, int bitmap) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, while (*p) { parent = *p; - info = rb_entry(parent, struct btrfs_free_space, bytes_index); + info = rb_entry(parent, struct btrfs_free_space, offset_index); - if (bytes < info->bytes) + if (offset < info->offset) { p = &(*p)->rb_left; - else + } else if (offset > info->offset) { p = &(*p)->rb_right; + } else { + /* + * we could have a bitmap entry and an extent entry + * share the same offset. If this is the case, we want + * the extent entry to always be found first if we do a + * linear search through the tree, since we want to have + * the quickest allocation time, and allocating from an + * extent is faster than allocating from a bitmap. So + * if we're inserting a bitmap and we find an entry at + * this offset, we want to go right, or after this entry + * logically. If we are inserting an extent and we've + * found a bitmap, we want to go left, or before + * logically. + */ + if (bitmap) { + WARN_ON(info->bitmap); + p = &(*p)->rb_right; + } else { + WARN_ON(!info->bitmap); + p = &(*p)->rb_left; + } + } } rb_link_node(node, parent, p); @@ -79,110 +102,142 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, /* * searches the tree for the given offset. * - * fuzzy == 1: this is used for allocations where we are given a hint of where - * to look for free space. Because the hint may not be completely on an offset - * mark, or the hint may no longer point to free space we need to fudge our - * results a bit. So we look for free space starting at or after offset with at - * least bytes size. We prefer to find as close to the given offset as we can. - * Also if the offset is within a free space range, then we will return the free - * space that contains the given offset, which means we can return a free space - * chunk with an offset before the provided offset. - * - * fuzzy == 0: this is just a normal tree search. Give us the free space that - * starts at the given offset which is at least bytes size, and if its not there - * return NULL. + * fuzzy - If this is set, then we are trying to make an allocation, and we just + * want a section that has at least bytes size and comes at or after the given + * offset. */ -static struct btrfs_free_space *tree_search_offset(struct rb_root *root, - u64 offset, u64 bytes, - int fuzzy) +static struct btrfs_free_space * +tree_search_offset(struct btrfs_block_group_cache *block_group, + u64 offset, int bitmap_only, int fuzzy) { - struct rb_node *n = root->rb_node; - struct btrfs_free_space *entry, *ret = NULL; + struct rb_node *n = block_group->free_space_offset.rb_node; + struct btrfs_free_space *entry, *prev = NULL; + + /* find entry that is closest to the 'offset' */ + while (1) { + if (!n) { + entry = NULL; + break; + } - while (n) { entry = rb_entry(n, struct btrfs_free_space, offset_index); + prev = entry; - if (offset < entry->offset) { - if (fuzzy && - (!ret || entry->offset < ret->offset) && - (bytes <= entry->bytes)) - ret = entry; + if (offset < entry->offset) n = n->rb_left; - } else if (offset > entry->offset) { - if (fuzzy && - (entry->offset + entry->bytes - 1) >= offset && - bytes <= entry->bytes) { - ret = entry; - break; - } + else if (offset > entry->offset) n = n->rb_right; - } else { - if (bytes > entry->bytes) { - n = n->rb_right; - continue; - } - ret = entry; + else break; - } } - return ret; -} - -/* - * return a chunk at least bytes size, as close to offset that we can get. - */ -static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, - u64 offset, u64 bytes) -{ - struct rb_node *n = root->rb_node; - struct btrfs_free_space *entry, *ret = NULL; + if (bitmap_only) { + if (!entry) + return NULL; + if (entry->bitmap) + return entry; - while (n) { - entry = rb_entry(n, struct btrfs_free_space, bytes_index); + /* + * bitmap entry and extent entry may share same offset, + * in that case, bitmap entry comes after extent entry. + */ + n = rb_next(n); + if (!n) + return NULL; + entry = rb_entry(n, struct btrfs_free_space, offset_index); + if (entry->offset != offset) + return NULL; - if (bytes < entry->bytes) { + WARN_ON(!entry->bitmap); + return entry; + } else if (entry) { + if (entry->bitmap) { /* - * We prefer to get a hole size as close to the size we - * are asking for so we don't take small slivers out of - * huge holes, but we also want to get as close to the - * offset as possible so we don't have a whole lot of - * fragmentation. + * if previous extent entry covers the offset, + * we should return it instead of the bitmap entry */ - if (offset <= entry->offset) { - if (!ret) - ret = entry; - else if (entry->bytes < ret->bytes) - ret = entry; - else if (entry->offset < ret->offset) - ret = entry; + n = &entry->offset_index; + while (1) { + n = rb_prev(n); + if (!n) + break; + prev = rb_entry(n, struct btrfs_free_space, + offset_index); + if (!prev->bitmap) { + if (prev->offset + prev->bytes > offset) + entry = prev; + break; + } } - n = n->rb_left; - } else if (bytes > entry->bytes) { - n = n->rb_right; + } + return entry; + } + + if (!prev) + return NULL; + + /* find last entry before the 'offset' */ + entry = prev; + if (entry->offset > offset) { + n = rb_prev(&entry->offset_index); + if (n) { + entry = rb_entry(n, struct btrfs_free_space, + offset_index); + BUG_ON(entry->offset > offset); } else { - /* - * Ok we may have multiple chunks of the wanted size, - * so we don't want to take the first one we find, we - * want to take the one closest to our given offset, so - * keep searching just in case theres a better match. - */ - n = n->rb_right; - if (offset > entry->offset) - continue; - else if (!ret || entry->offset < ret->offset) - ret = entry; + if (fuzzy) + return entry; + else + return NULL; } } - return ret; + if (entry->bitmap) { + n = &entry->offset_index; + while (1) { + n = rb_prev(n); + if (!n) + break; + prev = rb_entry(n, struct btrfs_free_space, + offset_index); + if (!prev->bitmap) { + if (prev->offset + prev->bytes > offset) + return prev; + break; + } + } + if (entry->offset + BITS_PER_BITMAP * + block_group->sectorsize > offset) + return entry; + } else if (entry->offset + entry->bytes > offset) + return entry; + + if (!fuzzy) + return NULL; + + while (1) { + if (entry->bitmap) { + if (entry->offset + BITS_PER_BITMAP * + block_group->sectorsize > offset) + break; + } else { + if (entry->offset + entry->bytes > offset) + break; + } + + n = rb_next(&entry->offset_index); + if (!n) + return NULL; + entry = rb_entry(n, struct btrfs_free_space, offset_index); + } + return entry; } static void unlink_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); - rb_erase(&info->bytes_index, &block_group->free_space_bytes); + block_group->free_extents--; } static int link_free_space(struct btrfs_block_group_cache *block_group, @@ -190,17 +245,311 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, { int ret = 0; - - BUG_ON(!info->bytes); + BUG_ON(!info->bitmap && !info->bytes); ret = tree_insert_offset(&block_group->free_space_offset, info->offset, - &info->offset_index); + &info->offset_index, (info->bitmap != NULL)); if (ret) return ret; - ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, - &info->bytes_index); - if (ret) - return ret; + block_group->free_extents++; + return ret; +} + +static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) +{ + u64 max_bytes, possible_bytes; + + /* + * The goal is to keep the total amount of memory used per 1gb of space + * at or below 32k, so we need to adjust how much memory we allow to be + * used by extent based free space tracking + */ + max_bytes = MAX_CACHE_BYTES_PER_GIG * + (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); + + possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) + + (sizeof(struct btrfs_free_space) * + block_group->extents_thresh); + + if (possible_bytes > max_bytes) { + int extent_bytes = max_bytes - + (block_group->total_bitmaps * PAGE_CACHE_SIZE); + + if (extent_bytes <= 0) { + block_group->extents_thresh = 0; + return; + } + + block_group->extents_thresh = extent_bytes / + (sizeof(struct btrfs_free_space)); + } +} + +static void bitmap_clear_bits(struct btrfs_free_space *info, u64 offset, u64 bytes, + u64 sectorsize) +{ + unsigned long start, end; + unsigned long i; + + start = offset_to_bit(info->offset, sectorsize, offset); + end = start + bytes_to_bits(bytes, sectorsize); + BUG_ON(end > BITS_PER_BITMAP); + + for (i = start; i < end; i++) + clear_bit(i, info->bitmap); + + info->bytes -= bytes; +} + +static void bitmap_set_bits(struct btrfs_free_space *info, u64 offset, u64 bytes, + u64 sectorsize) +{ + unsigned long start, end; + unsigned long i; + + start = offset_to_bit(info->offset, sectorsize, offset); + end = start + bytes_to_bits(bytes, sectorsize); + BUG_ON(end > BITS_PER_BITMAP); + + for (i = start; i < end; i++) + set_bit(i, info->bitmap); + + info->bytes += bytes; +} + +static int search_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info, u64 *offset, + u64 *bytes) +{ + unsigned long found_bits = 0; + unsigned long bits, i; + unsigned long next_zero; + + i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, + max_t(u64, *offset, bitmap_info->offset)); + bits = bytes_to_bits(*bytes, block_group->sectorsize); + + for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); + i < BITS_PER_BITMAP; + i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) { + next_zero = find_next_zero_bit(bitmap_info->bitmap, + BITS_PER_BITMAP, i); + if ((next_zero - i) >= bits) { + found_bits = next_zero - i; + break; + } + i = next_zero; + } + + if (found_bits) { + *offset = (u64)(i * block_group->sectorsize) + + bitmap_info->offset; + *bytes = (u64)(found_bits) * block_group->sectorsize; + return 0; + } + + return -1; +} + +static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache + *block_group, u64 *offset, + u64 *bytes, int debug) +{ + struct btrfs_free_space *entry; + struct rb_node *node; + int ret; + + if (!block_group->free_space_offset.rb_node) + return NULL; + + entry = tree_search_offset(block_group, + offset_to_bitmap(block_group, *offset), + 0, 1); + if (!entry) + return NULL; + + for (node = &entry->offset_index; node; node = rb_next(node)) { + entry = rb_entry(node, struct btrfs_free_space, offset_index); + if (entry->bytes < *bytes) + continue; + + if (entry->bitmap) { + ret = search_bitmap(block_group, entry, offset, bytes); + if (!ret) + return entry; + continue; + } + + *offset = entry->offset; + *bytes = entry->bytes; + return entry; + } + + return NULL; +} + +static void add_new_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, u64 offset) +{ + u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; + int max_bitmaps = (int)div64_u64(block_group->key.offset + + bytes_per_bg - 1, bytes_per_bg); + BUG_ON(block_group->total_bitmaps >= max_bitmaps); + + info->offset = offset_to_bitmap(block_group, offset); + link_free_space(block_group, info); + block_group->total_bitmaps++; + + recalculate_thresholds(block_group); +} + +static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info, + u64 *offset, u64 *bytes) +{ + u64 end; + +again: + end = bitmap_info->offset + + (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + + if (*offset > bitmap_info->offset && *offset + *bytes > end) { + bitmap_clear_bits(bitmap_info, *offset, + end - *offset + 1, block_group->sectorsize); + *bytes -= end - *offset + 1; + *offset = end + 1; + } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { + bitmap_clear_bits(bitmap_info, *offset, + *bytes, block_group->sectorsize); + *bytes = 0; + } + + if (*bytes) { + if (!bitmap_info->bytes) { + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } + + bitmap_info = tree_search_offset(block_group, + offset_to_bitmap(block_group, + *offset), + 1, 0); + if (!bitmap_info) + return -EINVAL; + + if (!bitmap_info->bitmap) + return -EAGAIN; + + goto again; + } else if (!bitmap_info->bytes) { + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } + + return 0; +} + +static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + struct btrfs_free_space *bitmap_info; + int added = 0; + u64 bytes, offset, end; + int ret; + + /* + * If we are below the extents threshold then we can add this as an + * extent, and don't have to deal with the bitmap + */ + if (block_group->free_extents < block_group->extents_thresh && + info->bytes > block_group->sectorsize * 4) + return 0; + + /* + * some block groups are so tiny they can't be enveloped by a bitmap, so + * don't even bother to create a bitmap for this + */ + if (BITS_PER_BITMAP * block_group->sectorsize > + block_group->key.offset) + return 0; + + bytes = info->bytes; + offset = info->offset; + +again: + bitmap_info = tree_search_offset(block_group, + offset_to_bitmap(block_group, offset), + 1, 0); + if (!bitmap_info) { + BUG_ON(added); + goto new_bitmap; + } + + end = bitmap_info->offset + + (u64)(BITS_PER_BITMAP * block_group->sectorsize); + + if (offset >= bitmap_info->offset && offset + bytes > end) { + bitmap_set_bits(bitmap_info, offset, end - offset, + block_group->sectorsize); + bytes -= end - offset; + offset = end; + added = 0; + } else if (offset >= bitmap_info->offset && offset + bytes <= end) { + bitmap_set_bits(bitmap_info, offset, bytes, + block_group->sectorsize); + bytes = 0; + } else { + BUG(); + } + + if (!bytes) { + ret = 1; + goto out; + } else + goto again; + +new_bitmap: + if (info && info->bitmap) { + add_new_bitmap(block_group, info, offset); + added = 1; + info = NULL; + goto again; + } else { + spin_unlock(&block_group->tree_lock); + + /* no pre-allocated info, allocate a new one */ + if (!info) { + info = kzalloc(sizeof(struct btrfs_free_space), + GFP_NOFS); + if (!info) { + spin_lock(&block_group->tree_lock); + ret = -ENOMEM; + goto out; + } + } + + /* allocate the bitmap */ + info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + spin_lock(&block_group->tree_lock); + if (!info->bitmap) { + ret = -ENOMEM; + goto out; + } + goto again; + } + +out: + if (info) { + if (info->bitmap) + kfree(info->bitmap); + kfree(info); + } return ret; } @@ -208,8 +557,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { - struct btrfs_free_space *right_info; - struct btrfs_free_space *left_info; + struct btrfs_free_space *right_info = NULL; + struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *info = NULL; int ret = 0; @@ -227,18 +576,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(&block_group->free_space_offset, - offset+bytes, 0, 0); - left_info = tree_search_offset(&block_group->free_space_offset, - offset-1, 0, 1); + right_info = tree_search_offset(block_group, offset + bytes, 0, 0); + if (right_info && rb_prev(&right_info->offset_index)) + left_info = rb_entry(rb_prev(&right_info->offset_index), + struct btrfs_free_space, offset_index); + else + left_info = tree_search_offset(block_group, offset - 1, 0, 0); - if (right_info) { + /* + * If there was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + if ((!left_info || left_info->bitmap) && + (!right_info || right_info->bitmap)) { + ret = insert_into_bitmap(block_group, info); + + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } + } + + if (right_info && !right_info->bitmap) { unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); } - if (left_info && left_info->offset + left_info->bytes == offset) { + if (left_info && !left_info->bitmap && + left_info->offset + left_info->bytes == offset) { unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; @@ -248,11 +617,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ret = link_free_space(block_group, info); if (ret) kfree(info); - +out: spin_unlock(&block_group->tree_lock); if (ret) { - printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); + printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); BUG_ON(ret == -EEXIST); } @@ -263,40 +632,65 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { struct btrfs_free_space *info; + struct btrfs_free_space *next_info = NULL; int ret = 0; spin_lock(&block_group->tree_lock); - info = tree_search_offset(&block_group->free_space_offset, offset, 0, - 1); - if (info && info->offset == offset) { - if (info->bytes < bytes) { - printk(KERN_ERR "Found free space at %llu, size %llu," - "trying to use %llu\n", - (unsigned long long)info->offset, - (unsigned long long)info->bytes, - (unsigned long long)bytes); +again: + info = tree_search_offset(block_group, offset, 0, 0); + if (!info) { + WARN_ON(1); + goto out_lock; + } + + if (info->bytes < bytes && rb_next(&info->offset_index)) { + u64 end; + next_info = rb_entry(rb_next(&info->offset_index), + struct btrfs_free_space, + offset_index); + + if (next_info->bitmap) + end = next_info->offset + BITS_PER_BITMAP * + block_group->sectorsize - 1; + else + end = next_info->offset + next_info->bytes; + + if (next_info->bytes < bytes || + next_info->offset > offset || offset > end) { + printk(KERN_CRIT "Found free space at %llu, size %llu," + " trying to use %llu\n", + (unsigned long long)info->offset, + (unsigned long long)info->bytes, + (unsigned long long)bytes); WARN_ON(1); ret = -EINVAL; - spin_unlock(&block_group->tree_lock); - goto out; + goto out_lock; } - unlink_free_space(block_group, info); - if (info->bytes == bytes) { - kfree(info); - spin_unlock(&block_group->tree_lock); - goto out; + info = next_info; + } + + if (info->bytes == bytes) { + unlink_free_space(block_group, info); + if (info->bitmap) { + kfree(info->bitmap); + block_group->total_bitmaps--; } + kfree(info); + goto out_lock; + } + if (!info->bitmap && info->offset == offset) { + unlink_free_space(block_group, info); info->offset += bytes; info->bytes -= bytes; + link_free_space(block_group, info); + goto out_lock; + } - ret = link_free_space(block_group, info); - spin_unlock(&block_group->tree_lock); - BUG_ON(ret); - } else if (info && info->offset < offset && - info->offset + info->bytes >= offset + bytes) { + if (!info->bitmap && info->offset <= offset && + info->offset + info->bytes >= offset + bytes) { u64 old_start = info->offset; /* * we're freeing space in the middle of the info, @@ -312,7 +706,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset + bytes; info->bytes = old_end - info->offset; ret = link_free_space(block_group, info); - BUG_ON(ret); + WARN_ON(ret); + if (ret) + goto out_lock; } else { /* the hole we're creating ends at the end * of the info struct, just free the info @@ -320,32 +716,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, kfree(info); } spin_unlock(&block_group->tree_lock); - /* step two, insert a new info struct to cover anything - * before the hole + + /* step two, insert a new info struct to cover + * anything before the hole */ ret = btrfs_add_free_space(block_group, old_start, offset - old_start); - BUG_ON(ret); - } else { - spin_unlock(&block_group->tree_lock); - if (!info) { - printk(KERN_ERR "couldn't find space %llu to free\n", - (unsigned long long)offset); - printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n", - block_group->cached, - (unsigned long long)block_group->key.objectid, - (unsigned long long)block_group->key.offset); - btrfs_dump_free_space(block_group, bytes); - } else if (info) { - printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, " - "but wanted offset=%llu bytes=%llu\n", - (unsigned long long)info->offset, - (unsigned long long)info->bytes, - (unsigned long long)offset, - (unsigned long long)bytes); - } - WARN_ON(1); + WARN_ON(ret); + goto out; } + + ret = remove_from_bitmap(block_group, info, &offset, &bytes); + if (ret == -EAGAIN) + goto again; + BUG_ON(ret); +out_lock: + spin_unlock(&block_group->tree_lock); out: return ret; } @@ -361,10 +747,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; - printk(KERN_ERR "entry offset %llu, bytes %llu\n", + printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n", (unsigned long long)info->offset, - (unsigned long long)info->bytes); + (unsigned long long)info->bytes, + (info->bitmap) ? "yes" : "no"); } + printk(KERN_INFO "block group has cluster?: %s\n", + list_empty(&block_group->cluster_list) ? "no" : "yes"); printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" "\n", count); } @@ -397,26 +786,35 @@ __btrfs_return_cluster_to_free_space( { struct btrfs_free_space *entry; struct rb_node *node; + bool bitmap; spin_lock(&cluster->lock); if (cluster->block_group != block_group) goto out; + bitmap = cluster->points_to_bitmap; + cluster->block_group = NULL; cluster->window_start = 0; + list_del_init(&cluster->block_group_list); + cluster->points_to_bitmap = false; + + if (bitmap) + goto out; + node = rb_first(&cluster->root); - while(node) { + while (node) { entry = rb_entry(node, struct btrfs_free_space, offset_index); node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); - link_free_space(block_group, entry); + BUG_ON(entry->bitmap); + tree_insert_offset(&block_group->free_space_offset, + entry->offset, &entry->offset_index, 0); } - list_del_init(&cluster->block_group_list); - - btrfs_put_block_group(cluster->block_group); - cluster->block_group = NULL; cluster->root.rb_node = NULL; + out: spin_unlock(&cluster->lock); + btrfs_put_block_group(block_group); return 0; } @@ -425,20 +823,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) struct btrfs_free_space *info; struct rb_node *node; struct btrfs_free_cluster *cluster; - struct btrfs_free_cluster *safe; + struct list_head *head; spin_lock(&block_group->tree_lock); - - list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, - block_group_list) { + while ((head = block_group->cluster_list.next) != + &block_group->cluster_list) { + cluster = list_entry(head, struct btrfs_free_cluster, + block_group_list); WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); + if (need_resched()) { + spin_unlock(&block_group->tree_lock); + cond_resched(); + spin_lock(&block_group->tree_lock); + } } - while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, bytes_index); + while ((node = rb_last(&block_group->free_space_offset)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, offset_index); unlink_free_space(block_group, info); + if (info->bitmap) + kfree(info->bitmap); kfree(info); if (need_resched()) { spin_unlock(&block_group->tree_lock); @@ -446,6 +852,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) spin_lock(&block_group->tree_lock); } } + spin_unlock(&block_group->tree_lock); } @@ -453,27 +860,37 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { struct btrfs_free_space *entry = NULL; + u64 bytes_search = bytes + empty_size; u64 ret = 0; spin_lock(&block_group->tree_lock); - entry = tree_search_offset(&block_group->free_space_offset, offset, - bytes + empty_size, 1); + entry = find_free_space(block_group, &offset, &bytes_search, 0); if (!entry) - entry = tree_search_bytes(&block_group->free_space_bytes, - offset, bytes + empty_size); - if (entry) { + goto out; + + ret = offset; + if (entry->bitmap) { + bitmap_clear_bits(entry, offset, bytes, + block_group->sectorsize); + if (!entry->bytes) { + unlink_free_space(block_group, entry); + kfree(entry->bitmap); + kfree(entry); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); + } + } else { unlink_free_space(block_group, entry); - ret = entry->offset; entry->offset += bytes; entry->bytes -= bytes; - if (!entry->bytes) kfree(entry); else link_free_space(block_group, entry); } - spin_unlock(&block_group->tree_lock); +out: + spin_unlock(&block_group->tree_lock); return ret; } @@ -517,6 +934,47 @@ int btrfs_return_cluster_to_free_space( return ret; } +static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + u64 bytes, u64 min_start) +{ + struct btrfs_free_space *entry; + int err; + u64 search_start = cluster->window_start; + u64 search_bytes = bytes; + u64 ret = 0; + + spin_lock(&block_group->tree_lock); + spin_lock(&cluster->lock); + + if (!cluster->points_to_bitmap) + goto out; + + if (cluster->block_group != block_group) + goto out; + + entry = tree_search_offset(block_group, search_start, 0, 0); + + if (!entry || !entry->bitmap) + goto out; + + search_start = min_start; + search_bytes = bytes; + + err = search_bitmap(block_group, entry, &search_start, + &search_bytes); + if (err) + goto out; + + ret = search_start; + bitmap_clear_bits(entry, ret, bytes, block_group->sectorsize); +out: + spin_unlock(&cluster->lock); + spin_unlock(&block_group->tree_lock); + + return ret; +} + /* * given a cluster, try to allocate 'bytes' from it, returns 0 * if it couldn't find anything suitably large, or a logical disk offset @@ -530,6 +988,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct rb_node *node; u64 ret = 0; + if (cluster->points_to_bitmap) + return btrfs_alloc_from_bitmap(block_group, cluster, bytes, + min_start); + spin_lock(&cluster->lock); if (bytes > cluster->max_size) goto out; @@ -567,9 +1029,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, } out: spin_unlock(&cluster->lock); + return ret; } +static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *entry, + struct btrfs_free_cluster *cluster, + u64 offset, u64 bytes, u64 min_bytes) +{ + unsigned long next_zero; + unsigned long i; + unsigned long search_bits; + unsigned long total_bits; + unsigned long found_bits; + unsigned long start = 0; + unsigned long total_found = 0; + bool found = false; + + i = offset_to_bit(entry->offset, block_group->sectorsize, + max_t(u64, offset, entry->offset)); + search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); + total_bits = bytes_to_bits(bytes, block_group->sectorsize); + +again: + found_bits = 0; + for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); + i < BITS_PER_BITMAP; + i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { + next_zero = find_next_zero_bit(entry->bitmap, + BITS_PER_BITMAP, i); + if (next_zero - i >= search_bits) { + found_bits = next_zero - i; + break; + } + i = next_zero; + } + + if (!found_bits) + return -1; + + if (!found) { + start = i; + found = true; + } + + total_found += found_bits; + + if (cluster->max_size < found_bits * block_group->sectorsize) + cluster->max_size = found_bits * block_group->sectorsize; + + if (total_found < total_bits) { + i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero); + if (i - start > total_bits * 2) { + total_found = 0; + cluster->max_size = 0; + found = false; + } + goto again; + } + + cluster->window_start = start * block_group->sectorsize + + entry->offset; + cluster->points_to_bitmap = true; + + return 0; +} + /* * here we try to find a cluster of blocks in a block group. The goal * is to find at least bytes free and up to empty_size + bytes free. @@ -587,12 +1113,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_space *entry = NULL; struct rb_node *node; struct btrfs_free_space *next; - struct btrfs_free_space *last; + struct btrfs_free_space *last = NULL; u64 min_bytes; u64 window_start; u64 window_free; u64 max_extent = 0; - int total_retries = 0; + bool found_bitmap = false; int ret; /* for metadata, allow allocates with more holes */ @@ -620,30 +1146,79 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } again: - min_bytes = min(min_bytes, bytes + empty_size); - entry = tree_search_bytes(&block_group->free_space_bytes, - offset, min_bytes); + entry = tree_search_offset(block_group, offset, found_bitmap, 1); if (!entry) { ret = -ENOSPC; goto out; } + + /* + * If found_bitmap is true, we exhausted our search for extent entries, + * and we just want to search all of the bitmaps that we can find, and + * ignore any extent entries we find. + */ + while (entry->bitmap || found_bitmap || + (!entry->bitmap && entry->bytes < min_bytes)) { + struct rb_node *node = rb_next(&entry->offset_index); + + if (entry->bitmap && entry->bytes > bytes + empty_size) { + ret = btrfs_bitmap_cluster(block_group, entry, cluster, + offset, bytes + empty_size, + min_bytes); + if (!ret) + goto got_it; + } + + if (!node) { + ret = -ENOSPC; + goto out; + } + entry = rb_entry(node, struct btrfs_free_space, offset_index); + } + + /* + * We already searched all the extent entries from the passed in offset + * to the end and didn't find enough space for the cluster, and we also + * didn't find any bitmaps that met our criteria, just go ahead and exit + */ + if (found_bitmap) { + ret = -ENOSPC; + goto out; + } + + cluster->points_to_bitmap = false; window_start = entry->offset; window_free = entry->bytes; last = entry; max_extent = entry->bytes; - while(1) { + while (1) { /* out window is just right, lets fill it */ if (window_free >= bytes + empty_size) break; node = rb_next(&last->offset_index); if (!node) { + if (found_bitmap) + goto again; ret = -ENOSPC; goto out; } next = rb_entry(node, struct btrfs_free_space, offset_index); + /* + * we found a bitmap, so if this search doesn't result in a + * cluster, we know to go and search again for the bitmaps and + * start looking for space there + */ + if (next->bitmap) { + if (!found_bitmap) + offset = next->offset; + found_bitmap = true; + last = next; + continue; + } + /* * we haven't filled the empty size and the window is * very large. reset and try again @@ -655,19 +1230,6 @@ again: window_free = entry->bytes; last = entry; max_extent = 0; - total_retries++; - if (total_retries % 64 == 0) { - if (min_bytes >= (bytes + empty_size)) { - ret = -ENOSPC; - goto out; - } - /* - * grow our allocation a bit, we're not having - * much luck - */ - min_bytes *= 2; - goto again; - } } else { last = next; window_free += next->bytes; @@ -685,11 +1247,19 @@ again: * The cluster includes an rbtree, but only uses the offset index * of each free space cache entry. */ - while(1) { + while (1) { node = rb_next(&entry->offset_index); - unlink_free_space(block_group, entry); + if (entry->bitmap && node) { + entry = rb_entry(node, struct btrfs_free_space, + offset_index); + continue; + } else if (entry->bitmap && !node) { + break; + } + + rb_erase(&entry->offset_index, &block_group->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, - &entry->offset_index); + &entry->offset_index, 0); BUG_ON(ret); if (!node || entry == last) @@ -697,8 +1267,10 @@ again: entry = rb_entry(node, struct btrfs_free_space, offset_index); } - ret = 0; + cluster->max_size = max_extent; +got_it: + ret = 0; atomic_inc(&block_group->count); list_add_tail(&cluster->block_group_list, &block_group->cluster_list); cluster->block_group = block_group; @@ -718,6 +1290,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) spin_lock_init(&cluster->refill_lock); cluster->root.rb_node = NULL; cluster->max_size = 0; + cluster->points_to_bitmap = false; INIT_LIST_HEAD(&cluster->block_group_list); cluster->block_group = NULL; } diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 266fb876405..890a8e79011 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -19,6 +19,14 @@ #ifndef __BTRFS_FREE_SPACE_CACHE #define __BTRFS_FREE_SPACE_CACHE +struct btrfs_free_space { + struct rb_node offset_index; + u64 offset; + u64 bytes; + unsigned long *bitmap; + struct list_head list; +}; + int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, -- cgit v1.2.3 From 817d52f8dba26d0295c26035531c30ce5f1e3c3e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 13 Jul 2009 21:29:25 -0400 Subject: Btrfs: async block group caching This patch moves the caching of the block group off to a kthread in order to allow people to allocate sooner. Instead of blocking up behind the caching mutex, we instead kick of the caching kthread, and then attempt to make an allocation. If we cannot, we wait on the block groups caching waitqueue, which the caching kthread will wake the waiting threads up everytime it finds 2 meg worth of space, and then again when its finished caching. This is how I tested the speedup from this mkfs the disk mount the disk fill the disk up with fs_mark unmount the disk mount the disk time touch /mnt/foo Without my changes this took 11 seconds on my box, with these changes it now takes 1 second. Another change thats been put in place is we lock the super mirror's in the pinned extent map in order to keep us from adding that stuff as free space when caching the block group. This doesn't really change anything else as far as the pinned extent map is concerned, since for actual pinned extents we use EXTENT_DIRTY, but it does mean that when we unmount we have to go in and unlock those extents to keep from leaking memory. I've also added a check where when we are reading block groups from disk, if the amount of space used == the size of the block group, we go ahead and mark the block group as cached. This drastically reduces the amount of time it takes to cache the block groups. Using the same test as above, except doing a dd to a file and then unmounting, it used to take 33 seconds to umount, now it takes 3 seconds. This version uses the commit_root in the caching kthread, and then keeps track of how many async caching threads are running at any given time so if one of the async threads is still running as we cross transactions we can wait until its finished before handling the pinned extents. Thank you, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 22 ++- fs/btrfs/disk-io.c | 3 + fs/btrfs/extent-tree.c | 471 +++++++++++++++++++++++++++++++++++--------- fs/btrfs/free-space-cache.c | 42 ++-- fs/btrfs/transaction.c | 23 ++- fs/btrfs/tree-log.c | 2 +- 6 files changed, 439 insertions(+), 124 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0cbf3491bb7..42b03c4ee49 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -691,6 +691,7 @@ struct btrfs_space_info { struct list_head block_groups; spinlock_t lock; struct rw_semaphore groups_sem; + atomic_t caching_threads; }; /* @@ -721,11 +722,17 @@ struct btrfs_free_cluster { struct list_head block_group_list; }; +enum btrfs_caching_type { + BTRFS_CACHE_NO = 0, + BTRFS_CACHE_STARTED = 1, + BTRFS_CACHE_FINISHED = 2, +}; + struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; + struct btrfs_fs_info *fs_info; spinlock_t lock; - struct mutex cache_mutex; u64 pinned; u64 reserved; u64 flags; @@ -733,15 +740,19 @@ struct btrfs_block_group_cache { int extents_thresh; int free_extents; int total_bitmaps; - int cached; int ro; int dirty; + /* cache tracking stuff */ + wait_queue_head_t caching_q; + int cached; + struct btrfs_space_info *space_info; /* free space cache stuff */ spinlock_t tree_lock; struct rb_root free_space_offset; + u64 free_space; /* block group cache stuff */ struct rb_node cache_node; @@ -834,6 +845,7 @@ struct btrfs_fs_info { atomic_t async_submit_draining; atomic_t nr_async_bios; atomic_t async_delalloc_pages; + atomic_t async_caching_threads; /* * this is used by the balancing code to wait for all the pending @@ -950,6 +962,9 @@ struct btrfs_root { /* the node lock is held while changing the node pointer */ spinlock_t node_lock; + /* taken when updating the commit root */ + struct rw_semaphore commit_root_sem; + struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; @@ -1911,7 +1926,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_update_pinned_extents(struct btrfs_root *root, - u64 bytenr, u64 num, int pin); + u64 bytenr, u64 num, int pin, int mark_free); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, @@ -1996,6 +2011,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, u64 bytes); void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, u64 bytes); +void btrfs_free_super_mirror_extents(struct btrfs_fs_info *info); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 55d9d188e69..ec2c915f7f4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -907,6 +907,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, spin_lock_init(&root->inode_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); + init_rwsem(&root->commit_root_sem); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); @@ -1566,6 +1567,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); + atomic_set(&fs_info->async_caching_threads, 0); fs_info->sb = sb; fs_info->max_extent = (u64)-1; fs_info->max_inline = 8192 * 1024; @@ -2337,6 +2339,7 @@ int close_ctree(struct btrfs_root *root) free_extent_buffer(root->fs_info->csum_root->commit_root); btrfs_free_block_groups(root->fs_info); + btrfs_free_super_mirror_extents(root->fs_info); del_fs_roots(fs_info); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 98697be6bdd..9a489cc89fd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "compat.h" #include "hash.h" #include "ctree.h" @@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 alloc_bytes, u64 flags, int force); +static noinline int +block_group_cache_done(struct btrfs_block_group_cache *cache) +{ + smp_mb(); + return cache->cached == BTRFS_CACHE_FINISHED; +} + static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) { return (cache->flags & bits) == bits; @@ -145,21 +153,64 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, return ret; } +void btrfs_free_super_mirror_extents(struct btrfs_fs_info *info) +{ + u64 start, end, last = 0; + int ret; + + while (1) { + ret = find_first_extent_bit(&info->pinned_extents, last, + &start, &end, EXTENT_LOCKED); + if (ret) + break; + + unlock_extent(&info->pinned_extents, start, end, GFP_NOFS); + last = end+1; + } +} + +static int remove_sb_from_cache(struct btrfs_root *root, + struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + u64 bytenr; + u64 *logical; + int stripe_len; + int i, nr, ret; + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + ret = btrfs_rmap_block(&root->fs_info->mapping_tree, + cache->key.objectid, bytenr, + 0, &logical, &nr, &stripe_len); + BUG_ON(ret); + while (nr--) { + try_lock_extent(&fs_info->pinned_extents, + logical[nr], + logical[nr] + stripe_len - 1, GFP_NOFS); + } + kfree(logical); + } + + return 0; +} + /* * this is only called by cache_block_group, since we could have freed extents * we need to check the pinned_extents for any extents that can't be used yet * since their free space will be released as soon as the transaction commits. */ -static int add_new_free_space(struct btrfs_block_group_cache *block_group, +static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end) { - u64 extent_start, extent_end, size; + u64 extent_start, extent_end, size, total_added = 0; int ret; while (start < end) { ret = find_first_extent_bit(&info->pinned_extents, start, &extent_start, &extent_end, - EXTENT_DIRTY); + EXTENT_DIRTY|EXTENT_LOCKED| + EXTENT_DELALLOC); if (ret) break; @@ -167,6 +218,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, start = extent_end + 1; } else if (extent_start > start && extent_start < end) { size = extent_start - start; + total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); @@ -178,84 +230,139 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, if (start < end) { size = end - start; + total_added += size; ret = btrfs_add_free_space(block_group, start, size); BUG_ON(ret); } - return 0; + return total_added; } -static int remove_sb_from_cache(struct btrfs_root *root, - struct btrfs_block_group_cache *cache) +DEFINE_MUTEX(discard_mutex); + +/* + * if async kthreads are running when we cross transactions, we mark any pinned + * extents with EXTENT_DELALLOC and then let the caching kthreads clean up those + * extents when they are done. Also we run this from btrfs_finish_extent_commit + * in case there were some pinned extents that were missed because we had + * already cached that block group. + */ +static void btrfs_discard_pinned_extents(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *cache) { - u64 bytenr; - u64 *logical; - int stripe_len; - int i, nr, ret; + u64 start, end, last; + int ret; - for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { - bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(&root->fs_info->mapping_tree, - cache->key.objectid, bytenr, 0, - &logical, &nr, &stripe_len); - BUG_ON(ret); - while (nr--) { - btrfs_remove_free_space(cache, logical[nr], - stripe_len); + if (!cache) + last = 0; + else + last = cache->key.objectid; + + mutex_lock(&discard_mutex); + while (1) { + ret = find_first_extent_bit(&fs_info->pinned_extents, last, + &start, &end, EXTENT_DELALLOC); + if (ret) + break; + + if (cache && start >= cache->key.objectid + cache->key.offset) + break; + + + if (!cache) { + cache = btrfs_lookup_block_group(fs_info, start); + BUG_ON(!cache); + + start = max(start, cache->key.objectid); + end = min(end, cache->key.objectid + cache->key.offset - 1); + + if (block_group_cache_done(cache)) + btrfs_add_free_space(cache, start, + end - start + 1); + cache = NULL; + } else { + start = max(start, cache->key.objectid); + end = min(end, cache->key.objectid + cache->key.offset - 1); + btrfs_add_free_space(cache, start, end - start + 1); + } + + clear_extent_bits(&fs_info->pinned_extents, start, end, + EXTENT_DELALLOC, GFP_NOFS); + last = end + 1; + + if (need_resched()) { + mutex_unlock(&discard_mutex); + cond_resched(); + mutex_lock(&discard_mutex); } - kfree(logical); } - return 0; + mutex_unlock(&discard_mutex); } -static int cache_block_group(struct btrfs_root *root, - struct btrfs_block_group_cache *block_group) +static int caching_kthread(void *data) { + struct btrfs_block_group_cache *block_group = data; + struct btrfs_fs_info *fs_info = block_group->fs_info; + u64 last = 0; struct btrfs_path *path; int ret = 0; struct btrfs_key key; struct extent_buffer *leaf; int slot; - u64 last; - - if (!block_group) - return 0; + u64 total_found = 0; - root = root->fs_info->extent_root; - - if (block_group->cached) - return 0; + BUG_ON(!fs_info); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - path->reada = 2; + atomic_inc(&fs_info->async_caching_threads); + atomic_inc(&block_group->space_info->caching_threads); + last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); +again: + /* need to make sure the commit_root doesn't disappear */ + down_read(&fs_info->extent_root->commit_root_sem); + /* - * we get into deadlocks with paths held by callers of this function. - * since the alloc_mutex is protecting things right now, just - * skip the locking here + * We don't want to deadlock with somebody trying to allocate a new + * extent for the extent root while also trying to search the extent + * root to add free space. So we skip locking and search the commit + * root, since its read-only */ path->skip_locking = 1; - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); + path->search_commit_root = 1; + path->reada = 2; + key.objectid = last; key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto err; while (1) { + smp_mb(); + if (block_group->fs_info->closing) + break; + leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { - ret = btrfs_next_leaf(root, path); + ret = btrfs_next_leaf(fs_info->extent_root, path); if (ret < 0) goto err; - if (ret == 0) - continue; - else + else if (ret) break; + + if (need_resched()) { + btrfs_release_path(fs_info->extent_root, path); + up_read(&fs_info->extent_root->commit_root_sem); + cond_resched(); + goto again; + } + + continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid < block_group->key.objectid) @@ -266,24 +373,63 @@ static int cache_block_group(struct btrfs_root *root, break; if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { - add_new_free_space(block_group, root->fs_info, last, - key.objectid); - + total_found += add_new_free_space(block_group, + fs_info, last, + key.objectid); last = key.objectid + key.offset; } + + if (total_found > (1024 * 1024 * 2)) { + total_found = 0; + wake_up(&block_group->caching_q); + } next: path->slots[0]++; } + ret = 0; - add_new_free_space(block_group, root->fs_info, last, - block_group->key.objectid + - block_group->key.offset); + total_found += add_new_free_space(block_group, fs_info, last, + block_group->key.objectid + + block_group->key.offset); + + spin_lock(&block_group->lock); + block_group->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&block_group->lock); - block_group->cached = 1; - remove_sb_from_cache(root, block_group); - ret = 0; err: btrfs_free_path(path); + up_read(&fs_info->extent_root->commit_root_sem); + atomic_dec(&fs_info->async_caching_threads); + atomic_dec(&block_group->space_info->caching_threads); + wake_up(&block_group->caching_q); + + if (!ret) + btrfs_discard_pinned_extents(fs_info, block_group); + + return 0; +} + +static int cache_block_group(struct btrfs_block_group_cache *cache) +{ + struct task_struct *tsk; + int ret = 0; + + spin_lock(&cache->lock); + if (cache->cached != BTRFS_CACHE_NO) { + spin_unlock(&cache->lock); + return ret; + } + cache->cached = BTRFS_CACHE_STARTED; + spin_unlock(&cache->lock); + + tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", + cache->key.objectid); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + printk(KERN_ERR "error running thread %d\n", ret); + BUG(); + } + return ret; } @@ -1721,7 +1867,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_update_pinned_extents(root, node->bytenr, - node->num_bytes, 1); + node->num_bytes, 1, 0); update_reserved_extents(root, node->bytenr, node->num_bytes, 0); } @@ -2496,6 +2642,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->force_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); + atomic_set(&found->caching_threads, 0); return 0; } @@ -2953,7 +3100,7 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) } int btrfs_update_pinned_extents(struct btrfs_root *root, - u64 bytenr, u64 num, int pin) + u64 bytenr, u64 num, int pin, int mark_free) { u64 len; struct btrfs_block_group_cache *cache; @@ -2988,7 +3135,7 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); fs_info->total_pinned -= len; - if (cache->cached) + if (block_group_cache_done(cache) && mark_free) btrfs_add_free_space(cache, bytenr, len); } btrfs_put_block_group(cache); @@ -3034,14 +3181,27 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) u64 last = 0; u64 start; u64 end; + bool caching_kthreads = false; struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents; int ret; + if (atomic_read(&root->fs_info->async_caching_threads)) + caching_kthreads = true; + while (1) { ret = find_first_extent_bit(pinned_extents, last, &start, &end, EXTENT_DIRTY); if (ret) break; + + /* + * we need to make sure that the pinned extents don't go away + * while we are caching block groups + */ + if (unlikely(caching_kthreads)) + set_extent_delalloc(pinned_extents, start, end, + GFP_NOFS); + set_extent_dirty(copy, start, end, GFP_NOFS); last = end + 1; } @@ -3055,6 +3215,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, u64 start; u64 end; int ret; + int mark_free = 1; + + ret = find_first_extent_bit(&root->fs_info->pinned_extents, 0, + &start, &end, EXTENT_DELALLOC); + if (!ret) + mark_free = 0; while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, @@ -3065,11 +3231,16 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, ret = btrfs_discard_extent(root, start, end + 1 - start); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, start, end + 1 - start, 0); + btrfs_update_pinned_extents(root, start, end + 1 - start, 0, + mark_free); clear_extent_dirty(unpin, start, end, GFP_NOFS); cond_resched(); } + + if (unlikely(!mark_free)) + btrfs_discard_pinned_extents(root->fs_info, NULL); + return ret; } @@ -3110,7 +3281,7 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans, pinit: btrfs_set_path_blocking(path); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); + btrfs_update_pinned_extents(root, bytenr, num_bytes, 1, 0); BUG_ON(err < 0); return 0; @@ -3421,7 +3592,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); + btrfs_update_pinned_extents(root, bytenr, num_bytes, 1, 0); update_reserved_extents(root, bytenr, num_bytes, 0); ret = 0; } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { @@ -3447,6 +3618,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) return ret; } +/* + * when we wait for progress in the block group caching, its because + * our allocation attempt failed at least once. So, we must sleep + * and let some progress happen before we try again. + * + * This function will sleep at least once waiting for new free space to + * show up, and then it will check the block group free space numbers + * for our min num_bytes. Another option is to have it go ahead + * and look in the rbtree for a free extent of a given size, but this + * is a good start. + */ +static noinline int +wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, + u64 num_bytes) +{ + DEFINE_WAIT(wait); + + prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE); + + if (block_group_cache_done(cache)) { + finish_wait(&cache->caching_q, &wait); + return 0; + } + schedule(); + finish_wait(&cache->caching_q, &wait); + + wait_event(cache->caching_q, block_group_cache_done(cache) || + (cache->free_space >= num_bytes)); + return 0; +} + +enum btrfs_loop_type { + LOOP_CACHED_ONLY = 0, + LOOP_CACHING_NOWAIT = 1, + LOOP_CACHING_WAIT = 2, + LOOP_ALLOC_CHUNK = 3, + LOOP_NO_EMPTY_SIZE = 4, +}; + /* * walks the btree of allocated extents and find a hole of a given size. * The key ins is changed to record the hole: @@ -3472,6 +3682,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_space_info *space_info; int last_ptr_loop = 0; int loop = 0; + bool found_uncached_bg = false; WARN_ON(num_bytes < root->sectorsize); btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); @@ -3503,15 +3714,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, search_start = max(search_start, first_logical_byte(root, 0)); search_start = max(search_start, hint_byte); - if (!last_ptr) { + if (!last_ptr) empty_cluster = 0; - loop = 1; - } if (search_start == hint_byte) { block_group = btrfs_lookup_block_group(root->fs_info, search_start); - if (block_group && block_group_bits(block_group, data)) { + /* + * we don't want to use the block group if it doesn't match our + * allocation bits, or if its not cached. + */ + if (block_group && block_group_bits(block_group, data) && + block_group_cache_done(block_group)) { down_read(&space_info->groups_sem); if (list_empty(&block_group->list) || block_group->ro) { @@ -3534,21 +3748,35 @@ search: down_read(&space_info->groups_sem); list_for_each_entry(block_group, &space_info->block_groups, list) { u64 offset; + int cached; atomic_inc(&block_group->count); search_start = block_group->key.objectid; have_block_group: - if (unlikely(!block_group->cached)) { - mutex_lock(&block_group->cache_mutex); - ret = cache_block_group(root, block_group); - mutex_unlock(&block_group->cache_mutex); - if (ret) { - btrfs_put_block_group(block_group); - break; + if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { + /* + * we want to start caching kthreads, but not too many + * right off the bat so we don't overwhelm the system, + * so only start them if there are less than 2 and we're + * in the initial allocation phase. + */ + if (loop > LOOP_CACHING_NOWAIT || + atomic_read(&space_info->caching_threads) < 2) { + ret = cache_block_group(block_group); + BUG_ON(ret); } } + cached = block_group_cache_done(block_group); + if (unlikely(!cached)) { + found_uncached_bg = true; + + /* if we only want cached bgs, loop */ + if (loop == LOOP_CACHED_ONLY) + goto loop; + } + if (unlikely(block_group->ro)) goto loop; @@ -3627,14 +3855,21 @@ refill_cluster: spin_unlock(&last_ptr->refill_lock); goto checks; } + } else if (!cached && loop > LOOP_CACHING_NOWAIT) { + spin_unlock(&last_ptr->refill_lock); + + wait_block_group_cache_progress(block_group, + num_bytes + empty_cluster + empty_size); + goto have_block_group; } + /* * at this point we either didn't find a cluster * or we weren't able to allocate a block from our * cluster. Free the cluster we've been trying * to use, and go to the next block group */ - if (loop < 2) { + if (loop < LOOP_NO_EMPTY_SIZE) { btrfs_return_cluster_to_free_space(NULL, last_ptr); spin_unlock(&last_ptr->refill_lock); @@ -3645,8 +3880,15 @@ refill_cluster: offset = btrfs_find_space_for_alloc(block_group, search_start, num_bytes, empty_size); - if (!offset) + if (!offset && (cached || (!cached && + loop == LOOP_CACHING_NOWAIT))) { goto loop; + } else if (!offset && (!cached && + loop > LOOP_CACHING_NOWAIT)) { + wait_block_group_cache_progress(block_group, + num_bytes + empty_size); + goto have_block_group; + } checks: search_start = stripe_align(root, offset); /* move on to the next group */ @@ -3694,13 +3936,26 @@ loop: } up_read(&space_info->groups_sem); - /* loop == 0, try to find a clustered alloc in every block group - * loop == 1, try again after forcing a chunk allocation - * loop == 2, set empty_size and empty_cluster to 0 and try again + /* LOOP_CACHED_ONLY, only search fully cached block groups + * LOOP_CACHING_NOWAIT, search partially cached block groups, but + * dont wait foR them to finish caching + * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching + * LOOP_ALLOC_CHUNK, force a chunk allocation and try again + * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try + * again */ - if (!ins->objectid && loop < 3 && - (empty_size || empty_cluster || allowed_chunk_alloc)) { - if (loop >= 2) { + if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && + (found_uncached_bg || empty_size || empty_cluster || + allowed_chunk_alloc)) { + if (found_uncached_bg) { + found_uncached_bg = false; + if (loop < LOOP_CACHING_WAIT) { + loop++; + goto search; + } + } + + if (loop == LOOP_ALLOC_CHUNK) { empty_size = 0; empty_cluster = 0; } @@ -3713,7 +3968,7 @@ loop: space_info->force_alloc = 1; } - if (loop < 3) { + if (loop < LOOP_NO_EMPTY_SIZE) { loop++; goto search; } @@ -3809,7 +4064,7 @@ again: num_bytes, data, 1); goto again; } - if (ret) { + if (ret == -ENOSPC) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); @@ -3817,7 +4072,6 @@ again: "wanted %llu\n", (unsigned long long)data, (unsigned long long)num_bytes); dump_space_info(sinfo, num_bytes); - BUG(); } return ret; @@ -3855,7 +4109,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, empty_size, hint_byte, search_end, ins, data); - update_reserved_extents(root, ins->objectid, ins->offset, 1); + if (!ret) + update_reserved_extents(root, ins->objectid, ins->offset, 1); + return ret; } @@ -4017,9 +4273,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group; block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); - mutex_lock(&block_group->cache_mutex); - cache_block_group(root, block_group); - mutex_unlock(&block_group->cache_mutex); + cache_block_group(block_group); + wait_event(block_group->caching_q, + block_group_cache_done(block_group)); ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); @@ -4050,7 +4306,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans, ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, empty_size, hint_byte, search_end, ins, 0); - BUG_ON(ret); + if (ret) + return ret; if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent == 0) @@ -6966,11 +7223,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) &info->block_group_cache_tree); spin_unlock(&info->block_group_cache_lock); - btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); + if (block_group->cached == BTRFS_CACHE_STARTED) + wait_event(block_group->caching_q, + block_group_cache_done(block_group)); + + btrfs_remove_free_space_cache(block_group); + WARN_ON(atomic_read(&block_group->count) != 1); kfree(block_group); @@ -7036,10 +7298,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); - mutex_init(&cache->cache_mutex); + cache->fs_info = info; + init_waitqueue_head(&cache->caching_q); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); - cache->sectorsize = root->sectorsize; /* * we only want to have 32k of ram per block group for keeping @@ -7057,6 +7319,26 @@ int btrfs_read_block_groups(struct btrfs_root *root) key.objectid = found_key.objectid + found_key.offset; btrfs_release_path(root, path); cache->flags = btrfs_block_group_flags(&cache->item); + cache->sectorsize = root->sectorsize; + + remove_sb_from_cache(root, cache); + + /* + * check for two cases, either we are full, and therefore + * don't need to bother with the caching work since we won't + * find any space, or we are empty, and we can just add all + * the space in and be done with it. This saves us _alot_ of + * time, particularly in the full case. + */ + if (found_key.offset == btrfs_block_group_used(&cache->item)) { + cache->cached = BTRFS_CACHE_FINISHED; + } else if (btrfs_block_group_used(&cache->item) == 0) { + cache->cached = BTRFS_CACHE_FINISHED; + add_new_free_space(cache, root->fs_info, + found_key.objectid, + found_key.objectid + + found_key.offset); + } ret = update_space_info(info, cache->flags, found_key.offset, btrfs_block_group_used(&cache->item), @@ -7112,7 +7394,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); spin_lock_init(&cache->tree_lock); - mutex_init(&cache->cache_mutex); + init_waitqueue_head(&cache->caching_q); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -7121,11 +7403,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->flags = type; btrfs_set_block_group_flags(&cache->item, type); - cache->cached = 1; - ret = btrfs_add_free_space(cache, chunk_offset, size); - BUG_ON(ret); + cache->cached = BTRFS_CACHE_FINISHED; remove_sb_from_cache(root, cache); + add_new_free_space(cache, root->fs_info, chunk_offset, + chunk_offset + size); + ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, &cache->space_info); BUG_ON(ret); @@ -7184,7 +7467,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, rb_erase(&block_group->cache_node, &root->fs_info->block_group_cache_tree); spin_unlock(&root->fs_info->block_group_cache_lock); - btrfs_remove_free_space_cache(block_group); + down_write(&block_group->space_info->groups_sem); /* * we must use list_del_init so people can check to see if they @@ -7193,6 +7476,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, list_del_init(&block_group->list); up_write(&block_group->space_info->groups_sem); + if (block_group->cached == BTRFS_CACHE_STARTED) + wait_event(block_group->caching_q, + block_group_cache_done(block_group)); + + btrfs_remove_free_space_cache(block_group); + spin_lock(&block_group->space_info->lock); block_group->space_info->total_bytes -= block_group->key.offset; block_group->space_info->bytes_readonly -= block_group->key.offset; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ab8cad8b46c..af99b78b288 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -238,6 +238,7 @@ static void unlink_free_space(struct btrfs_block_group_cache *block_group, { rb_erase(&info->offset_index, &block_group->free_space_offset); block_group->free_extents--; + block_group->free_space -= info->bytes; } static int link_free_space(struct btrfs_block_group_cache *block_group, @@ -251,6 +252,7 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, if (ret) return ret; + block_group->free_space += info->bytes; block_group->free_extents++; return ret; } @@ -285,36 +287,40 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) } } -static void bitmap_clear_bits(struct btrfs_free_space *info, u64 offset, u64 bytes, - u64 sectorsize) +static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, u64 offset, + u64 bytes) { unsigned long start, end; unsigned long i; - start = offset_to_bit(info->offset, sectorsize, offset); - end = start + bytes_to_bits(bytes, sectorsize); + start = offset_to_bit(info->offset, block_group->sectorsize, offset); + end = start + bytes_to_bits(bytes, block_group->sectorsize); BUG_ON(end > BITS_PER_BITMAP); for (i = start; i < end; i++) clear_bit(i, info->bitmap); info->bytes -= bytes; + block_group->free_space -= bytes; } -static void bitmap_set_bits(struct btrfs_free_space *info, u64 offset, u64 bytes, - u64 sectorsize) +static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, u64 offset, + u64 bytes) { unsigned long start, end; unsigned long i; - start = offset_to_bit(info->offset, sectorsize, offset); - end = start + bytes_to_bits(bytes, sectorsize); + start = offset_to_bit(info->offset, block_group->sectorsize, offset); + end = start + bytes_to_bits(bytes, block_group->sectorsize); BUG_ON(end > BITS_PER_BITMAP); for (i = start; i < end; i++) set_bit(i, info->bitmap); info->bytes += bytes; + block_group->free_space += bytes; } static int search_bitmap(struct btrfs_block_group_cache *block_group, @@ -414,13 +420,12 @@ again: (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(bitmap_info, *offset, - end - *offset + 1, block_group->sectorsize); + bitmap_clear_bits(block_group, bitmap_info, *offset, + end - *offset + 1); *bytes -= end - *offset + 1; *offset = end + 1; } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(bitmap_info, *offset, - *bytes, block_group->sectorsize); + bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); *bytes = 0; } @@ -495,14 +500,13 @@ again: (u64)(BITS_PER_BITMAP * block_group->sectorsize); if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(bitmap_info, offset, end - offset, - block_group->sectorsize); + bitmap_set_bits(block_group, bitmap_info, offset, + end - offset); bytes -= end - offset; offset = end; added = 0; } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(bitmap_info, offset, bytes, - block_group->sectorsize); + bitmap_set_bits(block_group, bitmap_info, offset, bytes); bytes = 0; } else { BUG(); @@ -870,8 +874,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { - bitmap_clear_bits(entry, offset, bytes, - block_group->sectorsize); + bitmap_clear_bits(block_group, entry, offset, bytes); if (!entry->bytes) { unlink_free_space(block_group, entry); kfree(entry->bitmap); @@ -891,6 +894,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, out: spin_unlock(&block_group->tree_lock); + return ret; } @@ -967,7 +971,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, goto out; ret = search_start; - bitmap_clear_bits(entry, ret, bytes, block_group->sectorsize); + bitmap_clear_bits(block_group, entry, ret, bytes); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 81f7124c305..32454d1c566 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -40,6 +40,14 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) } } +static noinline void switch_commit_root(struct btrfs_root *root) +{ + down_write(&root->commit_root_sem); + free_extent_buffer(root->commit_root); + root->commit_root = btrfs_root_node(root); + up_write(&root->commit_root_sem); +} + /* * either allocate a new transaction or hop into the existing one */ @@ -458,8 +466,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ret = btrfs_write_dirty_block_groups(trans, root); BUG_ON(ret); } - free_extent_buffer(root->commit_root); - root->commit_root = btrfs_root_node(root); + switch_commit_root(root); return 0; } @@ -537,8 +544,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); if (root->commit_root != root->node) { - free_extent_buffer(root->commit_root); - root->commit_root = btrfs_root_node(root); + switch_commit_root(root); btrfs_set_root_node(&root->root_item, root->node); } @@ -1002,15 +1008,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_set_root_node(&root->fs_info->tree_root->root_item, root->fs_info->tree_root->node); - free_extent_buffer(root->fs_info->tree_root->commit_root); - root->fs_info->tree_root->commit_root = - btrfs_root_node(root->fs_info->tree_root); + switch_commit_root(root->fs_info->tree_root); btrfs_set_root_node(&root->fs_info->chunk_root->root_item, root->fs_info->chunk_root->node); - free_extent_buffer(root->fs_info->chunk_root->commit_root); - root->fs_info->chunk_root->commit_root = - btrfs_root_node(root->fs_info->chunk_root); + switch_commit_root(root->fs_info->chunk_root); update_super_roots(root); @@ -1050,6 +1052,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, cur_trans->commit_done = 1; root->fs_info->last_trans_committed = cur_trans->transid; + wake_up(&cur_trans->commit_wait); put_transaction(cur_trans); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c13922206d1..19560686261 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -264,7 +264,7 @@ static int process_one_buffer(struct btrfs_root *log, { if (wc->pin) btrfs_update_pinned_extents(log->fs_info->extent_root, - eb->start, eb->len, 1); + eb->start, eb->len, 1, 0); if (btrfs_buffer_uptodate(eb, gen)) { if (wc->write) -- cgit v1.2.3 From 20736abaa361bea488df6a1f66f6b37fb01107b9 Mon Sep 17 00:00:00 2001 From: Diego Calleja Date: Fri, 24 Jul 2009 11:06:52 -0400 Subject: Btrfs: Remove code duplication in comp_keys comp_keys is duplicating what is done in btrfs_comp_cpu_keys, so just call it. Signed-off-by: Diego Calleja Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fdd423a550d..91572091c0a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) btrfs_disk_key_to_cpu(&k1, disk); - if (k1.objectid > k2->objectid) - return 1; - if (k1.objectid < k2->objectid) - return -1; - if (k1.type > k2->type) - return 1; - if (k1.type < k2->type) - return -1; - if (k1.offset > k2->offset) - return 1; - if (k1.offset < k2->offset) - return -1; - return 0; + return btrfs_comp_cpu_keys(&k1, k2); } /* -- cgit v1.2.3 From 1fcbac581be375ca0a686f72ee2b7fd1dbf386e7 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Fri, 24 Jul 2009 11:06:53 -0400 Subject: Btrfs: find_free_dev_extent doesn't handle holes at the start of the device find_free_dev_extent does not properly handle the case where the device is not complete free, and there is a free extent at the beginning of the device. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 55c37276a29..074c1c56d8c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -758,9 +758,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) goto error; - ret = btrfs_previous_item(root, path, 0, key.type); - if (ret < 0) - goto error; + if (ret > 0) { + ret = btrfs_previous_item(root, path, key.objectid, key.type); + if (ret < 0) + goto error; + if (ret > 0) + start_found = 1; + } l = path->nodes[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); while (1) { -- cgit v1.2.3 From 0a4eefbb745ec0e8a5b694ae3f40cc34082d8f61 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Fri, 24 Jul 2009 11:06:53 -0400 Subject: Btrfs: Fix ordering of key field checks in btrfs_previous_item Check objectid of item before checking the item type, otherwise we may return zero for a key that is actually too low. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 91572091c0a..978449af4cc 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4296,10 +4296,10 @@ int btrfs_previous_item(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.type == type) - return 0; if (found_key.objectid < min_objectid) break; + if (found_key.type == type) + return 0; if (found_key.objectid == min_objectid && found_key.type < type) break; -- cgit v1.2.3 From d717aa1d31c36cb56059e97966cb76f0be021969 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Fri, 24 Jul 2009 12:42:46 -0400 Subject: Btrfs: Avoid delayed reference update looping btrfs_split_leaf and btrfs_del_items can end up in a loop where one is constantly spliting a given leaf and the other is constantly merging it back with the adjacent nodes. There is a better fix for this, but in the interest of something small, this patch just changes btrfs_del_items back to balancing less often. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 978449af4cc..3fdcc0512d3 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1040,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BTRFS_NODEPTRS_PER_BLOCK(root) / 4) return 0; - if (btrfs_header_nritems(mid) > 2) - return 0; - if (btrfs_header_nritems(mid) < 2) err_on_enospc = 1; @@ -3796,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, } /* delete the leaf if it is mostly empty */ - if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { + if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { /* push_leaf_left fixes the path. * make sure the path still points to our leaf * for possible call to del_ptr below -- cgit v1.2.3 From ebecd3d9d2adba144c15f1d35c78e0c26ead1bfd Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 24 Jul 2009 13:17:44 -0400 Subject: Btrfs: make flushoncommit mount option correctly wait on ordered_extents The commit_transaction call to wait_ordered_extents when snap_pending passes nocow_only=1 to process only NOCOW or PREALLOC extents. This isn't correct for the 'flushoncommit' mode, as it skips extents we just started IO on in start_delalloc_inodes. So, in the flushoncommit case, wait on all ordered extents. Otherwise, only pass the nocow_only flag to wait_ordered_extents if snap_pending. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 32454d1c566..e51d2bc532f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -942,9 +942,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); - if (flush_on_commit || snap_pending) { - if (flush_on_commit) - btrfs_start_delalloc_inodes(root); + if (flush_on_commit) { + btrfs_start_delalloc_inodes(root); + ret = btrfs_wait_ordered_extents(root, 0); + BUG_ON(ret); + } else if (snap_pending) { ret = btrfs_wait_ordered_extents(root, 1); BUG_ON(ret); } -- cgit v1.2.3 From 283bb1979fa8580c4037d8df251449368c292a3b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Jul 2009 16:30:55 -0400 Subject: Btrfs: clear all space_info->full after removing a block group Btrfs allocates individual extents from block groups, and each block group has a specific type. It may hold metadata, data mirrored or striped etc. When we balance space (btrfs-vol -b) or remove a drive (btrfs-vol -r) we free block groups. Once a block group is freed, the space it was using on the device may be available for use by new block groups. btrfs_remove_block_group was clearing the flag that said 'our devices are full, don't even try to allocate new block groups', but it was only clearing that flag for a specific type of block group. This commit clears the full flag for all of the types of block groups, making it much more likely that we'll be able to balance space when the drive is close to full. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9a489cc89fd..508df5f7d2e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7486,7 +7486,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, block_group->space_info->total_bytes -= block_group->key.offset; block_group->space_info->bytes_readonly -= block_group->key.offset; spin_unlock(&block_group->space_info->lock); - block_group->space_info->full = 0; + + btrfs_clear_space_info_full(root->fs_info); btrfs_put_block_group(block_group); btrfs_put_block_group(block_group); -- cgit v1.2.3 From 9779b72f0584fd53e0de53f62f205bf0dc0db553 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Jul 2009 16:41:41 -0400 Subject: Btrfs: find smallest available device extent during chunk allocation Allocating new block group is easy when the disk has plenty of space. But things get difficult as the disk fills up, especially if the FS has been run through btrfs-vol -b. The balance operation is likely to make the total bytes available on the device greater than the largest extent we'll actually be able to allocate. But the device extent allocation code incorrectly assumes that a device with 5G free will be able to allocate a 5G extent. It isn't normally a problem because device extents don't get freed unless btrfs-vol -b is run. This fixes the device extent allocator to remember the largest free extent it can find, and then uses that value as a fallback. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 074c1c56d8c..5dbefd11b4a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -721,7 +721,8 @@ error: */ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, - u64 num_bytes, u64 *start) + u64 num_bytes, u64 *start, + u64 *max_avail) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; @@ -807,6 +808,10 @@ no_more_items: if (last_byte < search_start) last_byte = search_start; hole_size = key.offset - last_byte; + + if (hole_size > *max_avail) + *max_avail = hole_size; + if (key.offset > last_byte && hole_size >= num_bytes) { *start = last_byte; @@ -1625,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans, device->fs_devices->total_rw_bytes += diff; device->total_bytes = new_size; + device->disk_total_bytes = new_size; btrfs_clear_space_info_full(device->dev_root->fs_info); return btrfs_update_device(trans, device); @@ -2175,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, max_chunk_size); again: + max_avail = 0; if (!map || map->num_stripes != num_stripes) { kfree(map); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); @@ -2223,7 +2230,8 @@ again: if (device->in_fs_metadata && avail >= min_free) { ret = find_free_dev_extent(trans, device, - min_free, &dev_offset); + min_free, &dev_offset, + &max_avail); if (ret == 0) { list_move_tail(&device->dev_alloc_list, &private_devs); -- cgit v1.2.3 From 631c07c8d12bcc6ce4a0fbfbd64ea843d78e2b10 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 27 Jul 2009 13:57:00 -0400 Subject: Btrfs: Correct redundant test in add_inode_ref dir has already been tested. It seems that this test should be on the recently returned value inode. A simplified version of the semantic match that finds this problem is as follows: (http://www.emn.fr/x-info/coccinelle/) Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 19560686261..11d0787c618 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, return -ENOENT; inode = read_one_inode(root, key->objectid); - BUG_ON(!dir); + BUG_ON(!inode); ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); -- cgit v1.2.3 From 68b38550ddbea13d296184bf69edff387618b1d3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 27 Jul 2009 13:57:01 -0400 Subject: Btrfs: change how we unpin extents We are racy with async block caching and unpinning extents. This patch makes things much less complicated by only unpinning the extent if the block group is cached. We check the block_group->cached var under the block_group->lock spin lock. If it is set to BTRFS_CACHE_FINISHED then we update the pinned counters, and unpin the extent and add the free space back. If it is not set to this, we start the caching of the block group so the next time we unpin extents we can unpin the extent. This keeps us from racing with the async caching threads, lets us kill the fs wide async thread counter, and keeps us from having to set DELALLOC bits for every extent we hit if there are caching kthreads going. One thing that needed to be changed was btrfs_free_super_mirror_extents. Now instead of just looking for LOCKED extents, we also look for DIRTY extents, since we could have left some extents pinned in the previous transaction that will never get freed now that we are unmounting, which would cause us to leak memory. So btrfs_free_super_mirror_extents has been changed to btrfs_free_pinned_extents, and it will clear the extents locked for the super mirror, and any remaining pinned extents that may be present. Thank you, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 5 +- fs/btrfs/disk-io.c | 3 +- fs/btrfs/extent-tree.c | 149 ++++++++++++++----------------------------------- fs/btrfs/tree-log.c | 2 +- 4 files changed, 46 insertions(+), 113 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 42b03c4ee49..17ad92c29cf 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -845,7 +845,6 @@ struct btrfs_fs_info { atomic_t async_submit_draining; atomic_t nr_async_bios; atomic_t async_delalloc_pages; - atomic_t async_caching_threads; /* * this is used by the balancing code to wait for all the pending @@ -1926,7 +1925,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_update_pinned_extents(struct btrfs_root *root, - u64 bytenr, u64 num, int pin, int mark_free); + u64 bytenr, u64 num, int pin); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, @@ -2011,7 +2010,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, u64 bytes); void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, u64 bytes); -void btrfs_free_super_mirror_extents(struct btrfs_fs_info *info); +void btrfs_free_pinned_extents(struct btrfs_fs_info *info); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ec2c915f7f4..c658397c747 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1567,7 +1567,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); - atomic_set(&fs_info->async_caching_threads, 0); fs_info->sb = sb; fs_info->max_extent = (u64)-1; fs_info->max_inline = 8192 * 1024; @@ -2339,7 +2338,7 @@ int close_ctree(struct btrfs_root *root) free_extent_buffer(root->fs_info->csum_root->commit_root); btrfs_free_block_groups(root->fs_info); - btrfs_free_super_mirror_extents(root->fs_info); + btrfs_free_pinned_extents(root->fs_info); del_fs_roots(fs_info); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 508df5f7d2e..08188f1615d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -153,18 +153,26 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, return ret; } -void btrfs_free_super_mirror_extents(struct btrfs_fs_info *info) +/* + * We always set EXTENT_LOCKED for the super mirror extents so we don't + * overwrite them, so those bits need to be unset. Also, if we are unmounting + * with pinned extents still sitting there because we had a block group caching, + * we need to clear those now, since we are done. + */ +void btrfs_free_pinned_extents(struct btrfs_fs_info *info) { u64 start, end, last = 0; int ret; while (1) { ret = find_first_extent_bit(&info->pinned_extents, last, - &start, &end, EXTENT_LOCKED); + &start, &end, + EXTENT_LOCKED|EXTENT_DIRTY); if (ret) break; - unlock_extent(&info->pinned_extents, start, end, GFP_NOFS); + clear_extent_bits(&info->pinned_extents, start, end, + EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS); last = end+1; } } @@ -209,8 +217,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, while (start < end) { ret = find_first_extent_bit(&info->pinned_extents, start, &extent_start, &extent_end, - EXTENT_DIRTY|EXTENT_LOCKED| - EXTENT_DELALLOC); + EXTENT_DIRTY|EXTENT_LOCKED); if (ret) break; @@ -238,67 +245,6 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, return total_added; } -DEFINE_MUTEX(discard_mutex); - -/* - * if async kthreads are running when we cross transactions, we mark any pinned - * extents with EXTENT_DELALLOC and then let the caching kthreads clean up those - * extents when they are done. Also we run this from btrfs_finish_extent_commit - * in case there were some pinned extents that were missed because we had - * already cached that block group. - */ -static void btrfs_discard_pinned_extents(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *cache) -{ - u64 start, end, last; - int ret; - - if (!cache) - last = 0; - else - last = cache->key.objectid; - - mutex_lock(&discard_mutex); - while (1) { - ret = find_first_extent_bit(&fs_info->pinned_extents, last, - &start, &end, EXTENT_DELALLOC); - if (ret) - break; - - if (cache && start >= cache->key.objectid + cache->key.offset) - break; - - - if (!cache) { - cache = btrfs_lookup_block_group(fs_info, start); - BUG_ON(!cache); - - start = max(start, cache->key.objectid); - end = min(end, cache->key.objectid + cache->key.offset - 1); - - if (block_group_cache_done(cache)) - btrfs_add_free_space(cache, start, - end - start + 1); - cache = NULL; - } else { - start = max(start, cache->key.objectid); - end = min(end, cache->key.objectid + cache->key.offset - 1); - btrfs_add_free_space(cache, start, end - start + 1); - } - - clear_extent_bits(&fs_info->pinned_extents, start, end, - EXTENT_DELALLOC, GFP_NOFS); - last = end + 1; - - if (need_resched()) { - mutex_unlock(&discard_mutex); - cond_resched(); - mutex_lock(&discard_mutex); - } - } - mutex_unlock(&discard_mutex); -} - static int caching_kthread(void *data) { struct btrfs_block_group_cache *block_group = data; @@ -317,7 +263,6 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; - atomic_inc(&fs_info->async_caching_threads); atomic_inc(&block_group->space_info->caching_threads); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); again: @@ -399,13 +344,9 @@ next: err: btrfs_free_path(path); up_read(&fs_info->extent_root->commit_root_sem); - atomic_dec(&fs_info->async_caching_threads); atomic_dec(&block_group->space_info->caching_threads); wake_up(&block_group->caching_q); - if (!ret) - btrfs_discard_pinned_extents(fs_info, block_group); - return 0; } @@ -1867,7 +1808,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_update_pinned_extents(root, node->bytenr, - node->num_bytes, 1, 0); + node->num_bytes, 1); update_reserved_extents(root, node->bytenr, node->num_bytes, 0); } @@ -3100,19 +3041,15 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) } int btrfs_update_pinned_extents(struct btrfs_root *root, - u64 bytenr, u64 num, int pin, int mark_free) + u64 bytenr, u64 num, int pin) { u64 len; struct btrfs_block_group_cache *cache; struct btrfs_fs_info *fs_info = root->fs_info; - if (pin) { + if (pin) set_extent_dirty(&fs_info->pinned_extents, bytenr, bytenr + num - 1, GFP_NOFS); - } else { - clear_extent_dirty(&fs_info->pinned_extents, - bytenr, bytenr + num - 1, GFP_NOFS); - } while (num > 0) { cache = btrfs_lookup_block_group(fs_info, bytenr); @@ -3128,14 +3065,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, spin_unlock(&cache->space_info->lock); fs_info->total_pinned += len; } else { + int unpin = 0; + + /* + * in order to not race with the block group caching, we + * only want to unpin the extent if we are cached. If + * we aren't cached, we want to start async caching this + * block group so we can free the extent the next time + * around. + */ spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); - cache->pinned -= len; - cache->space_info->bytes_pinned -= len; + unpin = (cache->cached == BTRFS_CACHE_FINISHED); + if (likely(unpin)) { + cache->pinned -= len; + cache->space_info->bytes_pinned -= len; + fs_info->total_pinned -= len; + } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - fs_info->total_pinned -= len; - if (block_group_cache_done(cache) && mark_free) + + if (likely(unpin)) + clear_extent_dirty(&fs_info->pinned_extents, + bytenr, bytenr + len -1, + GFP_NOFS); + else + cache_block_group(cache); + + if (unpin) btrfs_add_free_space(cache, bytenr, len); } btrfs_put_block_group(cache); @@ -3181,27 +3138,15 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) u64 last = 0; u64 start; u64 end; - bool caching_kthreads = false; struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents; int ret; - if (atomic_read(&root->fs_info->async_caching_threads)) - caching_kthreads = true; - while (1) { ret = find_first_extent_bit(pinned_extents, last, &start, &end, EXTENT_DIRTY); if (ret) break; - /* - * we need to make sure that the pinned extents don't go away - * while we are caching block groups - */ - if (unlikely(caching_kthreads)) - set_extent_delalloc(pinned_extents, start, end, - GFP_NOFS); - set_extent_dirty(copy, start, end, GFP_NOFS); last = end + 1; } @@ -3215,12 +3160,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, u64 start; u64 end; int ret; - int mark_free = 1; - - ret = find_first_extent_bit(&root->fs_info->pinned_extents, 0, - &start, &end, EXTENT_DELALLOC); - if (!ret) - mark_free = 0; while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, @@ -3231,16 +3170,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, ret = btrfs_discard_extent(root, start, end + 1 - start); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, start, end + 1 - start, 0, - mark_free); + btrfs_update_pinned_extents(root, start, end + 1 - start, 0); clear_extent_dirty(unpin, start, end, GFP_NOFS); cond_resched(); } - if (unlikely(!mark_free)) - btrfs_discard_pinned_extents(root->fs_info, NULL); - return ret; } @@ -3281,7 +3216,7 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans, pinit: btrfs_set_path_blocking(path); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, bytenr, num_bytes, 1, 0); + btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); BUG_ON(err < 0); return 0; @@ -3592,7 +3527,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); /* unlocks the pinned mutex */ - btrfs_update_pinned_extents(root, bytenr, num_bytes, 1, 0); + btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); update_reserved_extents(root, bytenr, num_bytes, 0); ret = 0; } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 11d0787c618..d91b0de7c50 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -264,7 +264,7 @@ static int process_one_buffer(struct btrfs_root *log, { if (wc->pin) btrfs_update_pinned_extents(log->fs_info->extent_root, - eb->start, eb->len, 1, 0); + eb->start, eb->len, 1); if (btrfs_buffer_uptodate(eb, gen)) { if (wc->write) -- cgit v1.2.3 From 7b91e2661addd8e2419cb45f6a322aa5dab9bcee Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 23 Jul 2009 15:22:30 -0400 Subject: cifs: fix error handling in mount-time DFS referral chasing code If the referral is malformed or the hostname can't be resolved, then the current code generates an oops. Fix it to handle these errors gracefully. Reported-by: Sandro Mathys Acked-by: Igor Mammedov CC: Stable Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_dfs_ref.c | 12 +++++++++--- fs/cifs/connect.c | 13 ++++++++++--- 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 3bb11be8b6a..606912d8f2a 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void) * i.e. strips from UNC trailing path that is not part of share * name and fixup missing '\' in the begining of DFS node refferal * if neccessary. - * Returns pointer to share name on success or NULL on error. + * Returns pointer to share name on success or ERR_PTR on error. * Caller is responsible for freeing returned string. */ static char *cifs_get_share_name(const char *node_name) @@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name) UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, GFP_KERNEL); if (!UNC) - return NULL; + return ERR_PTR(-ENOMEM); /* get share name and server name */ if (node_name[1] != '\\') { @@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name) cERROR(1, ("%s: no server name end in node name: %s", __func__, node_name)); kfree(UNC); - return NULL; + return ERR_PTR(-EINVAL); } /* find sharename end */ @@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata, return ERR_PTR(-EINVAL); *devname = cifs_get_share_name(ref->node_name); + if (IS_ERR(*devname)) { + rc = PTR_ERR(*devname); + *devname = NULL; + goto compose_mount_options_err; + } + rc = dns_resolve_server_name_to_ip(*devname, &srvIP); if (rc != 0) { cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fc44d316d0b..f2486889b7b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2544,11 +2544,20 @@ remote_path_check: if (mount_data != mount_data_global) kfree(mount_data); + mount_data = cifs_compose_mount_options( cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); - kfree(fake_devname); + free_dfs_info_array(referrals, num_referrals); + kfree(fake_devname); + kfree(full_path); + + if (IS_ERR(mount_data)) { + rc = PTR_ERR(mount_data); + mount_data = NULL; + goto mount_fail_check; + } if (tcon) cifs_put_tcon(tcon); @@ -2556,8 +2565,6 @@ remote_path_check: cifs_put_smb_ses(pSesInfo); cleanup_volume_info(&volume_info); - FreeXid(xid); - kfree(full_path); referral_walks_count++; goto try_mount_again; } -- cgit v1.2.3 From f25784b35f590c81d5fb8245a8cd45e1afb6f1b2 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Tue, 28 Jul 2009 08:41:57 -0400 Subject: Btrfs: Fix async caching interaction with unmount - don't stop the caching thread until btrfs_commit_super return. - if caching is interrupted by umount, set last to (u64)-1. otherwise the un-scanned range of block group will be considered as free extent. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 +++ fs/btrfs/extent-tree.c | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c658397c747..3a9b8875988 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2317,6 +2317,9 @@ int close_ctree(struct btrfs_root *root) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } + fs_info->closing = 2; + smp_mb(); + if (fs_info->delalloc_bytes) { printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", (unsigned long long)fs_info->delalloc_bytes); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 08188f1615d..fadf69a2764 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -288,8 +288,10 @@ again: while (1) { smp_mb(); - if (block_group->fs_info->closing) + if (block_group->fs_info->closing > 1) { + last = (u64)-1; break; + } leaf = path->nodes[0]; slot = path->slots[0]; -- cgit v1.2.3 From 0f58b44582001c8bcdb75f36cf85ebbe5170e959 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Tue, 14 Jul 2009 17:56:15 +0200 Subject: sysfs: fix hardlink count on device_move Update directory hardlink count when moving kobjects to a new parent. Fixes the following problem which occurs when several devices are moved to the same parent and then unregistered: > ls -laF /sys/devices/css0/defunct/ > total 0 > drwxr-xr-x 4294967295 root root 0 2009-07-14 17:02 ./ > drwxr-xr-x 114 root root 0 2009-07-14 17:02 ../ > drwxr-xr-x 2 root root 0 2009-07-14 17:01 power/ > -rw-r--r-- 1 root root 4096 2009-07-14 17:01 uevent Signed-off-by: Peter Oberparleiter Cc: stable Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/dir.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index d88d0fac9fa..14f2d71ea3c 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -939,8 +939,10 @@ again: /* Remove from old parent's list and insert into new parent's list. */ sysfs_unlink_sibling(sd); sysfs_get(new_parent_sd); + drop_nlink(old_parent->d_inode); sysfs_put(sd->s_parent); sd->s_parent = new_parent_sd; + inc_nlink(new_parent->d_inode); sysfs_link_sibling(sd); out_unlock: -- cgit v1.2.3 From 6352a29305373ae6196491e6d4669f301e26492e Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Tue, 28 Jul 2009 13:57:01 -0500 Subject: eCryptfs: Check Tag 11 literal data buffer size Tag 11 packets are stored in the metadata section of an eCryptfs file to store the key signature(s) used to encrypt the file encryption key. After extracting the packet length field to determine the key signature length, a check is not performed to see if the length would exceed the key signature buffer size that was passed into parse_tag_11_packet(). Thanks to Ramon de Carvalho Valle for finding this bug using fsfuzzer. Signed-off-by: Tyler Hicks Cc: stable@kernel.org (2.6.27 and 30) Signed-off-by: Linus Torvalds --- fs/ecryptfs/keystore.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index af737bb56cb..5414253d4c9 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1449,6 +1449,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents, rc = -EINVAL; goto out; } + if (unlikely((*tag_11_contents_size) > max_contents_bytes)) { + printk(KERN_ERR "Literal data section in tag 11 packet exceeds " + "expected size\n"); + rc = -EINVAL; + goto out; + } if (data[(*packet_size)++] != 0x62) { printk(KERN_WARNING "Unrecognizable packet\n"); rc = -EINVAL; -- cgit v1.2.3 From f151cd2c54ddc7714e2f740681350476cda03a28 Mon Sep 17 00:00:00 2001 From: Ramon de Carvalho Valle Date: Tue, 28 Jul 2009 13:58:22 -0500 Subject: eCryptfs: parse_tag_3_packet check tag 3 packet encrypted key size The parse_tag_3_packet function does not check if the tag 3 packet contains a encrypted key size larger than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES. Signed-off-by: Ramon de Carvalho Valle [tyhicks@linux.vnet.ibm.com: Added printk newline and changed goto to out_free] Signed-off-by: Tyler Hicks Cc: stable@kernel.org (2.6.27 and 30) Signed-off-by: Linus Torvalds --- fs/ecryptfs/keystore.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 5414253d4c9..259525c9abb 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, } (*new_auth_tok)->session_key.encrypted_key_size = (body_size - (ECRYPTFS_SALT_SIZE + 5)); + if ((*new_auth_tok)->session_key.encrypted_key_size + > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { + printk(KERN_WARNING "Tag 3 packet contains key larger " + "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n"); + rc = -EINVAL; + goto out_free; + } if (unlikely(data[(*packet_size)++] != 0x04)) { printk(KERN_WARNING "Unknown version number [%d]\n", data[(*packet_size) - 1]); -- cgit v1.2.3 From dddac6a7b445de95515f64fdf82fe5dc36c02f26 Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Wed, 29 Jul 2009 21:07:55 +0200 Subject: PM / Hibernate: Replace bdget call with simple atomic_inc of i_count Create bdgrab(). This function copies an existing reference to a block_device. It is safe to call from any context. Hibernation code wishes to copy a reference to the active swap device. Right now it calls bdget() under a spinlock, but this is wrong because bdget() can sleep. It doesn't need a full bdget() because we already hold a reference to active swap devices (and the spinlock protects against swapoff). Fixes http://bugzilla.kernel.org/show_bug.cgi?id=13827 Signed-off-by: Alan Jenkins Signed-off-by: Rafael J. Wysocki --- fs/block_dev.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 3a6d4fb2a32..94dfda24c06 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -564,6 +564,16 @@ struct block_device *bdget(dev_t dev) EXPORT_SYMBOL(bdget); +/** + * bdgrab -- Grab a reference to an already referenced block device + * @bdev: Block device to grab a reference to. + */ +struct block_device *bdgrab(struct block_device *bdev) +{ + atomic_inc(&bdev->bd_inode->i_count); + return bdev; +} + long nr_blockdev_pages(void) { struct block_device *bdev; -- cgit v1.2.3 From 5c8053652328693d10551131432ef3573e77ed2d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 29 Jul 2009 15:04:11 -0700 Subject: fs/ramfs/file-nommu.c needs include/linux/sched.h This file makes use of various macros defined in files like asm/current.h or asm-generic/resource.h. All these files can be included via sched.h. The building of the !MMU ARM kernel (with additional patches) fails without this change. Signed-off-by: Catalin Marinas Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ramfs/file-nommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index ebb2c417912..11f0c06316d 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "internal.h" -- cgit v1.2.3 From 5bd9052d79daa4c8beb45436c408b6de672adb82 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 30 Jul 2009 02:26:14 +0000 Subject: [CIFS] Updates fs/cifs/CHANGES Signed-off-by: Steve French --- fs/cifs/CHANGES | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 92888aa9074..651cefde385 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,9 @@ +Version 1.60 +------------- +Fix memory leak in reconnect. Fix oops in DFS mount error path. +Set s_maxbytes to smaller (the max that vfs can handle) so that +sendfile will now work over cifs mounts again. + Version 1.59 ------------ Client uses server inode numbers (which are persistent) rather than -- cgit v1.2.3 From 2163b1e616c41c286f5ab79912671cd4bf52057c Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 25 Jun 2009 16:30:26 +0100 Subject: GFS2: Shrink the shrinker This patch removes some of the special cases that the shrinker was trying to deal with. As a result we leave fewer items on the list and none at all which cannot be demoted. This makes the list scanning more efficient and solves some issues seen with large numbers of inodes. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 297421c0427..fdb796c4f94 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1300,7 +1300,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) struct gfs2_glock *gl; int may_demote; int nr_skipped = 0; - int got_ref = 0; LIST_HEAD(skipped); if (nr == 0) @@ -1318,7 +1317,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) /* Test for being demotable */ if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { gfs2_glock_hold(gl); - got_ref = 1; spin_unlock(&lru_lock); spin_lock(&gl->gl_spin); may_demote = demote_ok(gl); @@ -1327,25 +1325,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) if (may_demote) { handle_callback(gl, LM_ST_UNLOCKED, 0); nr--; - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) - gfs2_glock_put(gl); - got_ref = 0; } + if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + gfs2_glock_put(gl); spin_lock(&lru_lock); - if (may_demote) - continue; - } - if (list_empty(&gl->gl_lru) && - (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { - nr_skipped++; - list_add(&gl->gl_lru, &skipped); - } - if (got_ref) { - spin_unlock(&lru_lock); - gfs2_glock_put(gl); - spin_lock(&lru_lock); - got_ref = 0; + continue; } + nr_skipped++; + list_add(&gl->gl_lru, &skipped); } list_splice(&skipped, &lru_list); atomic_add(nr_skipped, &lru_count); -- cgit v1.2.3 From 1946f70ab5e4eb8b54a8eaaedba2293a3750ab7e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 25 Jun 2009 15:09:51 -0500 Subject: GFS2: keep statfs info in sync on grows GFS2 wasn't syncing its statfs info on grows. This causes a problem when you grow the filesystem on multiple nodes. GFS2 would calculate the new space based on the resource groups (which are always current), and then assume that the filesystem had grown the from the existing statfs size. If you grew the filesystem on two different nodes in a short time, the second node wouldn't see the statfs size change from the first node, and would assume that it was grown by a larger amount than it was. When all these changes were synced out, the total fileystem size would be incorrect (the first grow would be counted twice). This patch syncs makes GFS2 read in the statfs changes from disk before a grow, and write them out after the grow, while the master statfs inode is locked. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/aops.c | 39 +++++++++++++++++++++++++++++++++++++++ fs/gfs2/super.c | 39 +++++++++++++++++++++++++-------------- fs/gfs2/super.h | 4 ++++ 3 files changed, 68 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 03ebb439ace..7ebae9a4ecc 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -624,6 +624,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; int alloc_required; int error = 0; @@ -637,6 +638,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; + if (&ip->i_inode == sdp->sd_rindex) { + error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, + GL_NOCACHE, &m_ip->i_gh); + if (unlikely(error)) { + gfs2_glock_dq(&ip->i_gh); + goto out_uninit; + } + } error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); if (error) @@ -667,6 +676,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; + if (&ip->i_inode == sdp->sd_rindex) + rblocks += 2 * RES_STATFS; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); @@ -712,6 +723,10 @@ out_alloc_put: gfs2_alloc_put(ip); } out_unlock: + if (&ip->i_inode == sdp->sd_rindex) { + gfs2_glock_dq(&m_ip->i_gh); + gfs2_holder_uninit(&m_ip->i_gh); + } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); @@ -725,14 +740,21 @@ out_uninit: static void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); + struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; + struct buffer_head *m_bh, *l_bh; u64 fs_total, new_free; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); + if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) + return; spin_lock(&sdp->sd_statfs_spin); + gfs2_statfs_change_in(m_sc, m_bh->b_data + + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else @@ -741,6 +763,13 @@ static void adjust_fs_space(struct inode *inode) fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); + + if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) + goto out; + update_statfs(sdp, m_bh, l_bh); + brelse(l_bh); +out: + brelse(m_bh); } /** @@ -763,6 +792,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); @@ -794,6 +824,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, brelse(dibh); gfs2_trans_end(sdp); + if (inode == sdp->sd_rindex) { + gfs2_glock_dq(&m_ip->i_gh); + gfs2_holder_uninit(&m_ip->i_gh); + } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return copied; @@ -823,6 +857,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; struct gfs2_alloc *al = ip->i_alloc; unsigned int from = pos & (PAGE_CACHE_SIZE - 1); @@ -865,6 +900,10 @@ failed: gfs2_quota_unlock(ip); gfs2_alloc_put(ip); } + if (inode == sdp->sd_rindex) { + gfs2_glock_dq(&m_ip->i_gh); + gfs2_holder_uninit(&m_ip->i_gh); + } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 0a680133647..552e321cee5 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -353,7 +353,7 @@ fail: return error; } -static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) +void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) { const struct gfs2_statfs_change *str = buf; @@ -441,6 +441,29 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, brelse(l_bh); } +void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, + struct buffer_head *l_bh) +{ + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); + struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; + + gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); + + spin_lock(&sdp->sd_statfs_spin); + m_sc->sc_total += l_sc->sc_total; + m_sc->sc_free += l_sc->sc_free; + m_sc->sc_dinodes += l_sc->sc_dinodes; + memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); + memset(l_bh->b_data + sizeof(struct gfs2_dinode), + 0, sizeof(struct gfs2_statfs_change)); + spin_unlock(&sdp->sd_statfs_spin); + + gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); + gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); +} + int gfs2_statfs_sync(struct gfs2_sbd *sdp) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); @@ -477,19 +500,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp) if (error) goto out_bh2; - gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1); - - spin_lock(&sdp->sd_statfs_spin); - m_sc->sc_total += l_sc->sc_total; - m_sc->sc_free += l_sc->sc_free; - m_sc->sc_dinodes += l_sc->sc_dinodes; - memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); - memset(l_bh->b_data + sizeof(struct gfs2_dinode), - 0, sizeof(struct gfs2_statfs_change)); - spin_unlock(&sdp->sd_statfs_spin); - - gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); - gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); + update_statfs(sdp, m_bh, l_bh); gfs2_trans_end(sdp); diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index b56413e3e40..22e0417ed99 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -40,6 +40,10 @@ extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp); extern int gfs2_statfs_init(struct gfs2_sbd *sdp); extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, s64 dinodes); +extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, + const void *buf); +extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, + struct buffer_head *l_bh); extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); -- cgit v1.2.3 From a51b56fff3f04fc5aa66b21a2a6d693ee9862d66 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 30 Jun 2009 13:51:11 -0500 Subject: GFS2: Fix panic in glock memory shrinker It is possible for gfs2_shrink_glock_memory() to check a glock for demotion that's in the process of being freed by gfs2_glock_put(). In this case, gfs2_shrink_glock_memory() will acquire a new reference to this glock, and then try to free the glock itself when it drops the refernce. To solve this, gfs2_shrink_glock_memory() just needs to check if the glock is in the process of being freed, and if so skip it without ever unlocking the lru_lock. Signed-off-by: Benjamin Marzinski Acked-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index fdb796c4f94..827136ee794 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1314,6 +1314,10 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) list_del_init(&gl->gl_lru); atomic_dec(&lru_count); + /* Check if glock is about to be freed */ + if (atomic_read(&gl->gl_ref) == 0) + continue; + /* Test for being demotable */ if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { gfs2_glock_hold(gl); -- cgit v1.2.3 From 1e19a19584b332eb92a573b66b7342fb97e67507 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 10 Jul 2009 21:13:38 +0100 Subject: GFS2: Don't try and dealloc own inode When searching for unlinked, but still allocated inodes during block allocation, avoid the block relating to the inode that is doing the allocation. This fixes a hang caused when an unlinked, but still open, inode tries to allocate some more blocks and lands up finding itself during the search for deallocatable inodes. Signed-off-by: Steven Whitehouse --- fs/gfs2/rgrp.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index daa4ae341a2..5e5074176da 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -961,7 +961,8 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) * Returns: The inode, if one has been found */ -static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked) +static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, + u64 skip) { struct inode *inode; u32 goal = 0, block; @@ -985,6 +986,8 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked) goal++; if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) continue; + if (no_addr == skip) + continue; *last_unlinked = no_addr; inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN, no_addr, -1, 1); @@ -1104,7 +1107,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) if (try_rgrp_fit(rgd, al)) goto out; if (rgd->rd_flags & GFS2_RDF_CHECK) - inode = try_rgrp_unlink(rgd, last_unlinked); + inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); if (!rg_locked) gfs2_glock_dq_uninit(&al->al_rgd_gh); if (inode) @@ -1138,7 +1141,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) if (try_rgrp_fit(rgd, al)) goto out; if (rgd->rd_flags & GFS2_RDF_CHECK) - inode = try_rgrp_unlink(rgd, last_unlinked); + inode = try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); if (!rg_locked) gfs2_glock_dq_uninit(&al->al_rgd_gh); if (inode) -- cgit v1.2.3 From 8ff22a6f9bdaac87c0eeb1d56c736181f11b4221 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Fri, 10 Jul 2009 18:04:24 -0500 Subject: GFS2: Don't put unlikely reclaim candidates on the reclaim list. GFS2 was placing far too many glocks on the reclaim list that were not good candidates for freeing up from cache. These locks would sit there and repeatedly get scanned to see if they could be reclaimed, wasting a lot of time when there was memory pressure. This fix does more checks on the locks to see if they are actually likely to be removable from cache. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 72 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 827136ee794..f041a89e1ab 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -173,6 +173,26 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) atomic_inc(&gl->gl_ref); } +/** + * demote_ok - Check to see if it's ok to unlock a glock + * @gl: the glock + * + * Returns: 1 if it's ok + */ + +static int demote_ok(const struct gfs2_glock *gl) +{ + const struct gfs2_glock_operations *glops = gl->gl_ops; + + if (gl->gl_state == LM_ST_UNLOCKED) + return 0; + if (!list_empty(&gl->gl_holders)) + return 0; + if (glops->go_demote_ok) + return glops->go_demote_ok(gl); + return 1; +} + /** * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list * @gl: the glock @@ -181,14 +201,34 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) { + int may_reclaim; + may_reclaim = (demote_ok(gl) && + (atomic_read(&gl->gl_ref) == 1 || + (gl->gl_name.ln_type == LM_TYPE_INODE && + atomic_read(&gl->gl_ref) <= 2))); spin_lock(&lru_lock); - if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { + if (list_empty(&gl->gl_lru) && may_reclaim) { list_add_tail(&gl->gl_lru, &lru_list); atomic_inc(&lru_count); } spin_unlock(&lru_lock); } +/** + * gfs2_glock_put_nolock() - Decrement reference count on glock + * @gl: The glock to put + * + * This function should only be used if the caller has its own reference + * to the glock, in addition to the one it is dropping. + */ + +static void gfs2_glock_put_nolock(struct gfs2_glock *gl) +{ + if (atomic_dec_and_test(&gl->gl_ref)) + GLOCK_BUG_ON(gl, 1); + gfs2_glock_schedule_for_reclaim(gl); +} + /** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put @@ -214,9 +254,9 @@ int gfs2_glock_put(struct gfs2_glock *gl) rv = 1; goto out; } - /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ - if (atomic_read(&gl->gl_ref) == 2) - gfs2_glock_schedule_for_reclaim(gl); + spin_lock(&gl->gl_spin); + gfs2_glock_schedule_for_reclaim(gl); + spin_unlock(&gl->gl_spin); write_unlock(gl_lock_addr(gl->gl_hash)); out: return rv; @@ -398,7 +438,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) if (held2) gfs2_glock_hold(gl); else - gfs2_glock_put(gl); + gfs2_glock_put_nolock(gl); } gl->gl_state = new_state; @@ -633,7 +673,7 @@ out: out_sched: gfs2_glock_hold(gl); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) - gfs2_glock_put(gl); + gfs2_glock_put_nolock(gl); out_unlock: clear_bit(GLF_LOCK, &gl->gl_flags); goto out; @@ -1274,26 +1314,6 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) gfs2_glock_put(gl); } -/** - * demote_ok - Check to see if it's ok to unlock a glock - * @gl: the glock - * - * Returns: 1 if it's ok - */ - -static int demote_ok(const struct gfs2_glock *gl) -{ - const struct gfs2_glock_operations *glops = gl->gl_ops; - - if (gl->gl_state == LM_ST_UNLOCKED) - return 0; - if (!list_empty(&gl->gl_holders)) - return 0; - if (glops->go_demote_ok) - return glops->go_demote_ok(gl); - return 1; -} - static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) { -- cgit v1.2.3 From 6b94617024bd6810cde1d0d491202c30d5a38d91 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Fri, 10 Jul 2009 18:13:26 -0500 Subject: GFS2: Fix incorrent statfs consistency check Since both linked and unlinked inodes are counted by rgd->rd_dinodes, It makes no sense to count them with the used data blocks (first check that I changed), it makes sense to count them with the linked inodes (second check), and it makes no sense to care if there are more unlinked inodes than linked ones. This fixes these errors. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/rgrp.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 5e5074176da..fba795798d3 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -285,27 +285,19 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) } tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; - if (count[1] + count[2] != tmp) { + if (count[1] != tmp) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used data mismatch: %u != %u\n", count[1], tmp); return; } - if (count[3] != rgd->rd_dinodes) { + if (count[2] + count[3] != rgd->rd_dinodes) { if (gfs2_consist_rgrpd(rgd)) fs_err(sdp, "used metadata mismatch: %u != %u\n", - count[3], rgd->rd_dinodes); + count[2] + count[3], rgd->rd_dinodes); return; } - - if (count[2] > count[3]) { - if (gfs2_consist_rgrpd(rgd)) - fs_err(sdp, "unlinked inodes > inodes: %u\n", - count[2]); - return; - } - } static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) -- cgit v1.2.3 From b94a170e96dc416828af9d350ae2e34b70ae7347 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 23 Jul 2009 18:52:34 -0500 Subject: GFS2: remove dcache entries for remote deleted inodes When a file is deleted from a gfs2 filesystem on one node, a dcache entry for it may still exist on other nodes in the cluster. If this happens, gfs2 will be unable to free this file on disk. Because of this, it's possible to have a gfs2 filesystem with no files on it and no free space. With this patch, when a node receives a callback notifying it that the file is being deleted on another node, it schedules a new workqueue thread to remove the file's dcache entry. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 43 ++++++++++++++++++++++++++++++++++++++----- fs/gfs2/glock.h | 3 +++ fs/gfs2/glops.c | 21 +++++++++++++++++++++ fs/gfs2/incore.h | 2 ++ fs/gfs2/super.c | 1 + 5 files changed, 65 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f041a89e1ab..8b674b1f3a5 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int static DECLARE_RWSEM(gfs2_umount_flush_sem); static struct dentry *gfs2_root; static struct workqueue_struct *glock_workqueue; +struct workqueue_struct *gfs2_delete_workqueue; static LIST_HEAD(lru_list); static atomic_t lru_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(lru_lock); @@ -167,7 +168,7 @@ static void glock_free(struct gfs2_glock *gl) * */ -static void gfs2_glock_hold(struct gfs2_glock *gl) +void gfs2_glock_hold(struct gfs2_glock *gl) { GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); atomic_inc(&gl->gl_ref); @@ -222,7 +223,7 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) * to the glock, in addition to the one it is dropping. */ -static void gfs2_glock_put_nolock(struct gfs2_glock *gl) +void gfs2_glock_put_nolock(struct gfs2_glock *gl) { if (atomic_dec_and_test(&gl->gl_ref)) GLOCK_BUG_ON(gl, 1); @@ -679,6 +680,29 @@ out_unlock: goto out; } +static void delete_work_func(struct work_struct *work) +{ + struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); + struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_inode *ip = NULL; + struct inode *inode; + u64 no_addr = 0; + + spin_lock(&gl->gl_spin); + ip = (struct gfs2_inode *)gl->gl_object; + if (ip) + no_addr = ip->i_no_addr; + spin_unlock(&gl->gl_spin); + if (ip) { + inode = gfs2_ilookup(sdp->sd_vfs, no_addr); + if (inode) { + d_prune_aliases(inode); + iput(inode); + } + } + gfs2_glock_put(gl); +} + static void glock_work_func(struct work_struct *work) { unsigned long delay = 0; @@ -757,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_sbd = sdp; gl->gl_aspace = NULL; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); + INIT_WORK(&gl->gl_delete, delete_work_func); /* If this glock protects actual on-disk data or metadata blocks, create a VFS inode to manage the pages/buffers holding them. */ @@ -898,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, gl->gl_demote_state != state) { gl->gl_demote_state = LM_ST_UNLOCKED; } + if (gl->gl_ops->go_callback) + gl->gl_ops->go_callback(gl); trace_gfs2_demote_rq(gl); } @@ -1344,14 +1371,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) spin_unlock(&lru_lock); spin_lock(&gl->gl_spin); may_demote = demote_ok(gl); - spin_unlock(&gl->gl_spin); - clear_bit(GLF_LOCK, &gl->gl_flags); if (may_demote) { handle_callback(gl, LM_ST_UNLOCKED, 0); nr--; } if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) - gfs2_glock_put(gl); + gfs2_glock_put_nolock(gl); + spin_unlock(&gl->gl_spin); + clear_bit(GLF_LOCK, &gl->gl_flags); spin_lock(&lru_lock); continue; } @@ -1738,6 +1765,11 @@ int __init gfs2_glock_init(void) glock_workqueue = create_workqueue("glock_workqueue"); if (IS_ERR(glock_workqueue)) return PTR_ERR(glock_workqueue); + gfs2_delete_workqueue = create_workqueue("delete_workqueue"); + if (IS_ERR(gfs2_delete_workqueue)) { + destroy_workqueue(glock_workqueue); + return PTR_ERR(gfs2_delete_workqueue); + } register_shrinker(&glock_shrinker); @@ -1748,6 +1780,7 @@ void gfs2_glock_exit(void) { unregister_shrinker(&glock_shrinker); destroy_workqueue(glock_workqueue); + destroy_workqueue(gfs2_delete_workqueue); } static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index a602a28f6f0..c609894ec0d 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -143,6 +143,7 @@ struct lm_lockops { #define GLR_TRYFAILED 13 +extern struct workqueue_struct *gfs2_delete_workqueue; static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) { struct gfs2_holder *gh; @@ -191,6 +192,8 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl) int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp); +void gfs2_glock_hold(struct gfs2_glock *gl); +void gfs2_glock_put_nolock(struct gfs2_glock *gl); int gfs2_glock_put(struct gfs2_glock *gl); void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, struct gfs2_holder *gh); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index d5e4ab155ca..6985eef06c3 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -323,6 +323,7 @@ static void trans_go_sync(struct gfs2_glock *gl) if (gl->gl_state != LM_ST_UNLOCKED && test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { + flush_workqueue(gfs2_delete_workqueue); gfs2_meta_syncfs(sdp); gfs2_log_shutdown(sdp); } @@ -372,6 +373,25 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl) return 0; } +/** + * iopen_go_callback - schedule the dcache entry for the inode to be deleted + * @gl: the glock + * + * gl_spin lock is held while calling this + */ +static void iopen_go_callback(struct gfs2_glock *gl) +{ + struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; + + if (gl->gl_demote_state == LM_ST_UNLOCKED && + gl->gl_state == LM_ST_SHARED && + ip && test_bit(GIF_USER, &ip->i_flags)) { + gfs2_glock_hold(gl); + if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) + gfs2_glock_put_nolock(gl); + } +} + const struct gfs2_glock_operations gfs2_meta_glops = { .go_type = LM_TYPE_META, }; @@ -406,6 +426,7 @@ const struct gfs2_glock_operations gfs2_trans_glops = { const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, + .go_callback = iopen_go_callback, }; const struct gfs2_glock_operations gfs2_flock_glops = { diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 225347fbff3..61801ada36f 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -159,6 +159,7 @@ struct gfs2_glock_operations { int (*go_lock) (struct gfs2_holder *gh); void (*go_unlock) (struct gfs2_holder *gh); int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); + void (*go_callback) (struct gfs2_glock *gl); const int go_type; const unsigned long go_min_hold_time; }; @@ -228,6 +229,7 @@ struct gfs2_glock { struct list_head gl_ail_list; atomic_t gl_ail_count; struct delayed_work gl_work; + struct work_struct gl_delete; }; #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 552e321cee5..f522bb01797 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -691,6 +691,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) struct gfs2_holder t_gh; int error; + flush_workqueue(gfs2_delete_workqueue); gfs2_quota_sync(sdp); gfs2_statfs_sync(sdp); -- cgit v1.2.3 From 276e680d192a67d222fcea51af37b056feffb665 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Thu, 30 Jul 2009 09:40:40 -0400 Subject: Btrfs: preserve commit_root for async caching The async block group caching code uses the commit_root pointer to get a stable version of the extent allocation tree for scanning. This copy of the tree root isn't going to change and it significantly reduces the complexity of the scanning code. During a commit, we have a loop where we update the extent allocation tree root. We need to loop because updating the root pointer in the tree of tree roots may allocate blocks which may change the extent allocation tree. Right now the commit_root pointer is changed inside this loop. It is more correct to change the commit_root pointer only after all the looping is done. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 4 +--- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 6 +++--- fs/btrfs/transaction.c | 12 +++++++++--- 4 files changed, 14 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 17ad92c29cf..38eeb6c49c8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -827,6 +827,7 @@ struct btrfs_fs_info { struct mutex drop_mutex; struct mutex volume_mutex; struct mutex tree_reloc_mutex; + struct rw_semaphore extent_commit_sem; /* * this protects the ordered operations list only while we are @@ -961,9 +962,6 @@ struct btrfs_root { /* the node lock is held while changing the node pointer */ spinlock_t node_lock; - /* taken when updating the commit root */ - struct rw_semaphore commit_root_sem; - struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3a9b8875988..3cf4cfa575c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -907,7 +907,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, spin_lock_init(&root->inode_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); - init_rwsem(&root->commit_root_sem); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); @@ -1624,6 +1623,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); mutex_init(&fs_info->tree_reloc_mutex); + init_rwsem(&fs_info->extent_commit_sem); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fadf69a2764..2fe21fa7491 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -267,7 +267,7 @@ static int caching_kthread(void *data) last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); again: /* need to make sure the commit_root doesn't disappear */ - down_read(&fs_info->extent_root->commit_root_sem); + down_read(&fs_info->extent_commit_sem); /* * We don't want to deadlock with somebody trying to allocate a new @@ -304,7 +304,7 @@ again: if (need_resched()) { btrfs_release_path(fs_info->extent_root, path); - up_read(&fs_info->extent_root->commit_root_sem); + up_read(&fs_info->extent_commit_sem); cond_resched(); goto again; } @@ -345,7 +345,7 @@ next: err: btrfs_free_path(path); - up_read(&fs_info->extent_root->commit_root_sem); + up_read(&fs_info->extent_commit_sem); atomic_dec(&block_group->space_info->caching_threads); wake_up(&block_group->caching_q); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e51d2bc532f..de48e4ec808 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -42,10 +42,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) static noinline void switch_commit_root(struct btrfs_root *root) { - down_write(&root->commit_root_sem); free_extent_buffer(root->commit_root); root->commit_root = btrfs_root_node(root); - up_write(&root->commit_root_sem); } /* @@ -466,7 +464,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ret = btrfs_write_dirty_block_groups(trans, root); BUG_ON(ret); } - switch_commit_root(root); + + if (root != root->fs_info->extent_root) + switch_commit_root(root); + return 0; } @@ -499,6 +500,11 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, update_cowonly_root(trans, root); } + + down_write(&fs_info->extent_commit_sem); + switch_commit_root(fs_info->extent_root); + up_write(&fs_info->extent_commit_sem); + return 0; } -- cgit v1.2.3 From f36f3042eae238bdaefe7c79310afe573bfc3622 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 30 Jul 2009 10:04:48 -0400 Subject: Btrfs: be more polite in the async caching threads The semaphore used by the async caching threads can prevent a transaction commit, which can make the FS appear to stall. This releases the semaphore more often when a transaction commit is in progress. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 5 +++-- fs/btrfs/transaction.c | 10 ++++++++++ fs/btrfs/transaction.h | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2fe21fa7491..dc84daee6bc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -302,10 +302,11 @@ again: else if (ret) break; - if (need_resched()) { + if (need_resched() || + btrfs_transaction_in_commit(fs_info)) { btrfs_release_path(fs_info->extent_root, path); up_read(&fs_info->extent_commit_sem); - cond_resched(); + schedule_timeout(1); goto again; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index de48e4ec808..cdbb5022da5 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -857,6 +857,16 @@ static void update_super_roots(struct btrfs_root *root) super->root_level = root_item->level; } +int btrfs_transaction_in_commit(struct btrfs_fs_info *info) +{ + int ret = 0; + spin_lock(&info->new_trans_lock); + if (info->running_transaction) + ret = info->running_transaction->in_commit; + spin_unlock(&info->new_trans_lock); + return ret; +} + int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 961c3ee5a2e..663c6740491 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -107,4 +107,5 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages); +int btrfs_transaction_in_commit(struct btrfs_fs_info *info); #endif -- cgit v1.2.3 From 4bf17af0dbfe4cf20cb750e22e8e926273e7a7a4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 14 Jul 2009 19:30:23 +0200 Subject: udf: Fix loading of VAT inode when drive wrongly reports number of recorded blocks VAT inode is located in the last block recorded block of the medium. When the drive errorneously reports number of recorded blocks, we failed to load the VAT inode and thus mount the medium. This patch makes kernel try to read VAT inode from the last block of the device if it is different from the last recorded block. Signed-off-by: Jan Kara --- fs/udf/super.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/udf/super.c b/fs/udf/super.c index 6832135159b..9d1b8c2e6c4 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1087,11 +1087,23 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) struct udf_inode_info *vati; uint32_t pos; struct virtualAllocationTable20 *vat20; + sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; /* VAT file entry is in the last recorded block */ ino.partitionReferenceNum = type1_index; ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root; sbi->s_vat_inode = udf_iget(sb, &ino); + if (!sbi->s_vat_inode && + sbi->s_last_block != blocks - 1) { + printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the" + " last recorded block (%lu), retrying with the last " + "block of the device (%lu).\n", + (unsigned long)sbi->s_last_block, + (unsigned long)blocks - 1); + ino.partitionReferenceNum = type1_index; + ino.logicalBlockNum = blocks - 1 - map->s_partition_root; + sbi->s_vat_inode = udf_iget(sb, &ino); + } if (!sbi->s_vat_inode) return 1; -- cgit v1.2.3 From dee865656f2d8b866f8ac22c60d6363b914e9f12 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 22 Jul 2009 18:12:17 +0200 Subject: quota: Silence lockdep on quota_on Commit d01730d74d2b0155da50d44555001706294014f7 didn't completely fix the problem since we still take dqio_mutex and i_mutex in the wrong order. Move taking of i_mutex further down (luckily it's needed only for updating inode flags) below where dqio_mutex is taken. Tested-by: Valdis Kletnieks Signed-off-by: Jan Kara --- fs/quota/dquot.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 70f36c043d6..38f7bd559f3 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2043,7 +2043,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, invalidate_bdev(sb->s_bdev); } mutex_lock(&dqopt->dqonoff_mutex); - mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); if (sb_has_quota_loaded(sb, type)) { error = -EBUSY; goto out_lock; @@ -2054,9 +2053,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, * possible) Also nobody should write to the file - we use * special IO operations which ignore the immutable bit. */ down_write(&dqopt->dqptr_sem); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; + mutex_unlock(&inode->i_mutex); up_write(&dqopt->dqptr_sem); sb->dq_op->drop(inode); } @@ -2080,7 +2081,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, goto out_file_init; } mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&inode->i_mutex); spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(flags, type); spin_unlock(&dq_state_lock); @@ -2096,13 +2096,14 @@ out_file_init: out_lock: if (oldflags != -1) { down_write(&dqopt->dqptr_sem); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); /* Set the flags back (in the case of accidental quotaon() * on a wrong file we don't want to mess up the flags) */ inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); inode->i_flags |= oldflags; + mutex_unlock(&inode->i_mutex); up_write(&dqopt->dqptr_sem); } - mutex_unlock(&inode->i_mutex); mutex_unlock(&dqopt->dqonoff_mutex); out_fmt: put_quota_format(fmt); -- cgit v1.2.3 From 97db39a1f6f69e906e98118392400de5217aa33a Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Sun, 26 Jul 2009 21:52:01 -0500 Subject: xfs: reduce bmv_count in xfs_vn_fiemap commit 6321e3ed2acf3ee9643cdd403e1c88605d7944ba caused the full bmv_count's worth of getbmapx structures to get allocated; telling it to do MAXEXTNUM was a bit insane, resulting in ENOMEM every time. Chop it down to something reasonable, the number of slots in the caller's input buffer. If this is too large the caller may get ENOMEM but the reason should not be a mystery, and they can try again with something smaller. We add 1 to the value because in the normal getbmap world, bmv_count includes the header and xfs_getbmap does: nex = bmv->bmv_count - 1; if (nex <= 0) return XFS_ERROR(EINVAL); Signed-off-by: Eric Sandeen Reviewed-by: Olaf Weber Reviewed-by: Christoph Hellwig Signed-off-by: Felix Blyakher --- fs/xfs/linux-2.6/xfs_iops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 58973bb4603..8070b34cc28 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -680,8 +680,8 @@ xfs_vn_fiemap( else bm.bmv_length = BTOBB(length); - /* our formatter will tell xfs_getbmap when to stop. */ - bm.bmv_count = MAXEXTNUM; + /* We add one because in getbmap world count includes the header */ + bm.bmv_count = fieinfo->fi_extents_max + 1; bm.bmv_iflags = BMV_IF_PREALLOC; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) bm.bmv_iflags |= BMV_IF_ATTRFORK; -- cgit v1.2.3 From c8a4051c3731b6db224482218cfd535ab9393ff8 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Fri, 31 Jul 2009 00:02:17 -0500 Subject: xfs: bump up nr_to_write in xfs_vm_writepage VM calculation for nr_to_write seems off. Bump it way up, this gets simple streaming writes zippy again. To be reviewed again after Jens' writeback changes. Signed-off-by: Christoph Hellwig Signed-off-by: Eric Sandeen Cc: Chris Mason Reviewed-by: Felix Blyakher Signed-off-by: Felix Blyakher --- fs/xfs/linux-2.6/xfs_aops.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 7ec89fc05b2..aecf2519db7 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1268,6 +1268,14 @@ xfs_vm_writepage( if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * VM calculation for nr_to_write seems off. Bump it way + * up, this gets simple streaming writes zippy again. + * To be reviewed again after Jens' writeback changes. + */ + wbc->nr_to_write *= 4; + /* * Convert delayed allocate, unwritten or unmapped space * to real space and flush out to disk. -- cgit v1.2.3 From 6606bb97e146a387932efee263745b7240a11193 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 31 Jul 2009 11:03:58 -0400 Subject: Btrfs: fix btrfs_remove_from_free_space corner case Yan Zheng hit a problem where we tried to remove some free space but failed because we couldn't find the free space entry. This is because the free space was held within a bitmap that had a starting offset well before the actual offset of the free space, and there were free space extents that were in the same range as that offset, so tree_search_offset returned with NULL because we couldn't find a free space extent that had that offset. This is fixed by making sure that if we fail to find the entry, we re-search again with bitmap_only set to 1 and do an offset_to_bitmap so we can get the appropriate bitmap. A similar problem happens in btrfs_alloc_from_bitmap for the clustering code, but that is not as bad since we will just go and redo our cluster allocation. Also this adds some debugging checks to make sure that the free space we are trying to remove from the bitmap is in fact there. This can probably go away after a while, but since this code is only used by the tree-logging stuff it would be nice to run with it for a while to make sure there are no problems. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 73 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 64 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index af99b78b288..5edcee3a617 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -414,11 +414,29 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro u64 *offset, u64 *bytes) { u64 end; + u64 search_start, search_bytes; + int ret; again: end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + /* + * XXX - this can go away after a few releases. + * + * since the only user of btrfs_remove_free_space is the tree logging + * stuff, and the only way to test that is under crash conditions, we + * want to have this debug stuff here just in case somethings not + * working. Search the bitmap for the space we are trying to use to + * make sure its actually there. If its not there then we need to stop + * because something has gone wrong. + */ + search_start = *offset; + search_bytes = *bytes; + ret = search_bitmap(block_group, bitmap_info, &search_start, + &search_bytes); + BUG_ON(ret < 0 || search_start != *offset); + if (*offset > bitmap_info->offset && *offset + *bytes > end) { bitmap_clear_bits(block_group, bitmap_info, *offset, end - *offset + 1); @@ -430,6 +448,7 @@ again: } if (*bytes) { + struct rb_node *next = rb_next(&bitmap_info->offset_index); if (!bitmap_info->bytes) { unlink_free_space(block_group, bitmap_info); kfree(bitmap_info->bitmap); @@ -438,16 +457,36 @@ again: recalculate_thresholds(block_group); } - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, - *offset), - 1, 0); - if (!bitmap_info) + /* + * no entry after this bitmap, but we still have bytes to + * remove, so something has gone wrong. + */ + if (!next) return -EINVAL; + bitmap_info = rb_entry(next, struct btrfs_free_space, + offset_index); + + /* + * if the next entry isn't a bitmap we need to return to let the + * extent stuff do its work. + */ if (!bitmap_info->bitmap) return -EAGAIN; + /* + * Ok the next item is a bitmap, but it may not actually hold + * the information for the rest of this free space stuff, so + * look for it, and if we don't find it return so we can try + * everything over again. + */ + search_start = *offset; + search_bytes = *bytes; + ret = search_bitmap(block_group, bitmap_info, &search_start, + &search_bytes); + if (ret < 0 || search_start != *offset) + return -EAGAIN; + goto again; } else if (!bitmap_info->bytes) { unlink_free_space(block_group, bitmap_info); @@ -644,8 +683,17 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, again: info = tree_search_offset(block_group, offset, 0, 0); if (!info) { - WARN_ON(1); - goto out_lock; + /* + * oops didn't find an extent that matched the space we wanted + * to remove, look for a bitmap instead + */ + info = tree_search_offset(block_group, + offset_to_bitmap(block_group, offset), + 1, 0); + if (!info) { + WARN_ON(1); + goto out_lock; + } } if (info->bytes < bytes && rb_next(&info->offset_index)) { @@ -957,8 +1005,15 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, if (cluster->block_group != block_group) goto out; - entry = tree_search_offset(block_group, search_start, 0, 0); - + /* + * search_start is the beginning of the bitmap, but at some point it may + * be a good idea to point to the actual start of the free area in the + * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only + * to 1 to make sure we get the bitmap entry + */ + entry = tree_search_offset(block_group, + offset_to_bitmap(block_group, search_start), + 1, 0); if (!entry || !entry->bitmap) goto out; -- cgit v1.2.3 From 013f1b12f4fc479f697acae2f31bad220162cd03 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 31 Jul 2009 14:57:55 -0400 Subject: Btrfs: make sure the async caching thread advances the key The async caching thread can end up looping forever if a given search puts it at the last key in a leaf. It will end up calling btrfs_next_leaf and then checking if it needs to politely drop the read semaphore. Most of the time this looping isn't noticed because it is able to make progress the next time around. But, during log replay, we wait on the async caching thread to finish, and the async thread is waiting on the commit, and no progress is really made. The fix used here is to copy the key out of the next leaf, that way our search lands there properly. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dc84daee6bc..72a2b9c28e9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -265,10 +265,6 @@ static int caching_kthread(void *data) atomic_inc(&block_group->space_info->caching_threads); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); -again: - /* need to make sure the commit_root doesn't disappear */ - down_read(&fs_info->extent_commit_sem); - /* * We don't want to deadlock with somebody trying to allocate a new * extent for the extent root while also trying to search the extent @@ -282,6 +278,10 @@ again: key.objectid = last; key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); +again: + /* need to make sure the commit_root doesn't disappear */ + down_read(&fs_info->extent_commit_sem); + ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto err; @@ -304,6 +304,19 @@ again: if (need_resched() || btrfs_transaction_in_commit(fs_info)) { + leaf = path->nodes[0]; + + /* this shouldn't happen, but if the + * leaf is empty just move on. + */ + if (btrfs_header_nritems(leaf) == 0) + break; + /* + * we need to copy the key out so that + * we are sure the next search advances + * us forward in the btree. + */ + btrfs_item_key_to_cpu(leaf, &key, 0); btrfs_release_path(fs_info->extent_root, path); up_read(&fs_info->extent_commit_sem); schedule_timeout(1); -- cgit v1.2.3 From a97778457f22181e8c38c4cd7d7e528378738a98 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 28 Jul 2009 17:55:29 +0900 Subject: nilfs2: fix oops due to inconsistent state in page with discrete b-tree nodes Andrea Gelmini gave me a report that a kernel oops hit on a nilfs filesystem with a 1KB block size when doing rsync. This turned out to be caused by an inconsistency of dirty state between a page and its buffers storing b-tree node blocks. If the page had multiple buffers split over multiple logs, and if the logs were written at a time, a dirty flag remained in the page even every dirty flag in the buffers was cleared. This will fix the failure by dropping the dirty flag properly for pages with the discrete multiple b-tree nodes. Reported-by: Andrea Gelmini Signed-off-by: Ryusuke Konishi Tested-by: Andrea Gelmini Cc: stable@kernel.org --- fs/nilfs2/segment.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 8b5e4778cf2..51ff3d0a4ee 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1859,12 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err) if (!page) return; - if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) + if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { /* * For b-tree node pages, this function may be called twice * or more because they might be split in a segment. */ + if (PageDirty(page)) { + /* + * For pages holding split b-tree node buffers, dirty + * flag on the buffers may be cleared discretely. + * In that case, the page is once redirtied for + * remaining buffers, and it must be cancelled if + * all the buffers get cleaned later. + */ + lock_page(page); + if (nilfs_page_buffers_clean(page)) + __nilfs_clear_page_dirty(page); + unlock_page(page); + } return; + } __nilfs_end_page_io(page, err); } -- cgit v1.2.3 From 9b9d6b2434fe942895c341b9a982f158529788ec Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 31 Jul 2009 06:56:09 -0400 Subject: cifs: reinstate original behavior when uid=/gid= options are specified This patch fixes the regression reported here: http://bugzilla.kernel.org/show_bug.cgi?id=13861 commit 4ae1507f6d266d0cc3dd36e474d83aad70fec9e4 changed the default behavior when the uid= or gid= option was specified for a mount. The existing behavior was to always clobber the ownership information provided by the server when these options were specified. The above commit changed this behavior so that these options simply provided defaults when the server did not provide this information (unless "forceuid" or "forcegid" were specified) This patch reverts this change so that the default behavior is restored. It also adds "noforceuid" and "noforcegid" options to make it so that ownership information from the server is preserved, even when the mount has uid= or gid= options specified. It also adds a couple of printk notices that pop up when forceuid or forcegid options are specified without a uid= or gid= option. Reported-by: Tom Chiverton Reviewed-by: Shirish Pargaonkar Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f2486889b7b..1f3345d7fa7 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -803,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname, char *data; unsigned int temp_len, i, j; char separator[2]; + short int override_uid = -1; + short int override_gid = -1; + bool uid_specified = false; + bool gid_specified = false; separator[0] = ','; separator[1] = 0; @@ -1093,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname, "too long.\n"); return 1; } - } else if (strnicmp(data, "uid", 3) == 0) { - if (value && *value) - vol->linux_uid = - simple_strtoul(value, &value, 0); - } else if (strnicmp(data, "forceuid", 8) == 0) { - vol->override_uid = 1; - } else if (strnicmp(data, "gid", 3) == 0) { - if (value && *value) - vol->linux_gid = - simple_strtoul(value, &value, 0); - } else if (strnicmp(data, "forcegid", 8) == 0) { - vol->override_gid = 1; + } else if (!strnicmp(data, "uid", 3) && value && *value) { + vol->linux_uid = simple_strtoul(value, &value, 0); + uid_specified = true; + } else if (!strnicmp(data, "forceuid", 8)) { + override_uid = 1; + } else if (!strnicmp(data, "noforceuid", 10)) { + override_uid = 0; + } else if (!strnicmp(data, "gid", 3) && value && *value) { + vol->linux_gid = simple_strtoul(value, &value, 0); + gid_specified = true; + } else if (!strnicmp(data, "forcegid", 8)) { + override_gid = 1; + } else if (!strnicmp(data, "noforcegid", 10)) { + override_gid = 0; } else if (strnicmp(data, "file_mode", 4) == 0) { if (value && *value) { vol->file_mode = @@ -1355,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname, if (vol->UNCip == NULL) vol->UNCip = &vol->UNC[2]; + if (uid_specified) + vol->override_uid = override_uid; + else if (override_uid == 1) + printk(KERN_NOTICE "CIFS: ignoring forceuid mount option " + "specified with no uid= option.\n"); + + if (gid_specified) + vol->override_gid = override_gid; + else if (override_gid == 1) + printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " + "specified with no gid= option.\n"); + return 0; } -- cgit v1.2.3 From 01a261e09a21e0ba342d3907a79cf5c78ee3f37a Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 2 Aug 2009 17:45:55 +0900 Subject: nilfs2: fix missing unlock in error path of nilfs_mdt_write_page This adds a missing unlock of nilfs->ns_writer_mutex in nilfs_mdt_write_page() function. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/mdt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 3d3ddb3f517..2dfd47714ae 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) return 0; /* Do not request flush for shadow page cache */ if (!sb) { writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); - if (!writer) + if (!writer) { + nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs); return -EROFS; + } sb = writer->s_super; } -- cgit v1.2.3 From 4486d6ede16b362f89b29845af6fe1a26ae78a54 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 3 Aug 2009 12:45:10 -0400 Subject: cifs: show noforceuid/noforcegid mount options (try #2) Since forceuid is the default, we now need to show when it's disabled. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 44f30504b82..84b75253b05 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -376,10 +376,14 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) seq_printf(s, ",forceuid"); + else + seq_printf(s, ",noforceuid"); seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) seq_printf(s, ",forcegid"); + else + seq_printf(s, ",noforcegid"); cifs_show_address(s, tcon->ses->server); -- cgit v1.2.3 From 24e2fb615fd6b624c320cec9ea9d91a75dad902e Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sun, 2 Aug 2009 13:00:18 +0200 Subject: cifs: Read buffer overflow Check whether index is within bounds before testing the element. Acked-by: Jeff Layton Signed-off-by: Roel Kluin Signed-off-by: Steve French --- fs/cifs/cifs_unicode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 60e3c4253de..714a542cbaf 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes, int maxwords = maxbytes / 2; char tmp[NLS_MAX_CHARSET_SIZE]; - for (i = 0; from[i] && i < maxwords; i++) { + for (i = 0; i < maxwords && from[i]; i++) { charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, NLS_MAX_CHARSET_SIZE); if (charlen > 0) -- cgit v1.2.3 From d098564f3b2b5d555e51bca765a6a9e0dda8f2cd Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 4 Aug 2009 03:53:28 +0000 Subject: [CIFS] Update readme to reflect forceuid mount parms Signed-off-by: Steve French --- fs/cifs/CHANGES | 3 ++- fs/cifs/README | 25 ++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 651cefde385..e85b1e4389e 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -2,7 +2,8 @@ Version 1.60 ------------- Fix memory leak in reconnect. Fix oops in DFS mount error path. Set s_maxbytes to smaller (the max that vfs can handle) so that -sendfile will now work over cifs mounts again. +sendfile will now work over cifs mounts again. Add noforcegid +and noforceuid mount parameters. Version 1.59 ------------ diff --git a/fs/cifs/README b/fs/cifs/README index ad92921dbde..79c1a93400b 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -262,11 +262,11 @@ A partial list of the supported mount options follows: mount. domain Set the SMB/CIFS workgroup name prepended to the username during CIFS session establishment - forceuid Set the default uid for inodes based on the uid - passed in. For mounts to servers + forceuid Set the default uid for inodes to the uid + passed in on mount. For mounts to servers which do support the CIFS Unix extensions, such as a properly configured Samba server, the server provides - the uid, gid and mode so this parameter should not be + the uid, gid and mode so this parameter should not be specified unless the server and clients uid and gid numbering differ. If the server and client are in the same domain (e.g. running winbind or nss_ldap) and @@ -278,11 +278,7 @@ A partial list of the supported mount options follows: of existing files will be the uid (gid) of the person who executed the mount (root, except when mount.cifs is configured setuid for user mounts) unless the "uid=" - (gid) mount option is specified. For the uid (gid) of newly - created files and directories, ie files created since - the last mount of the server share, the expected uid - (gid) is cached as long as the inode remains in - memory on the client. Also note that permission + (gid) mount option is specified. Also note that permission checks (authorization checks) on accesses to a file occur at the server, but there are cases in which an administrator may want to restrict at the client as well. For those @@ -290,12 +286,15 @@ A partial list of the supported mount options follows: (such as Windows), permissions can also be checked at the client, and a crude form of client side permission checking can be enabled by specifying file_mode and dir_mode on - the client. Note that the mount.cifs helper must be - at version 1.10 or higher to support specifying the uid - (or gid) in non-numeric form. - forcegid (similar to above but for the groupid instead of uid) + the client. (default) + forcegid (similar to above but for the groupid instead of uid) (default) + noforceuid Fill in file owner information (uid) by requesting it from + the server if possible. With this option, the value given in + the uid= option (on mount) will only be used if the server + can not support returning uids on inodes. + noforcegid (similar to above but for the group owner, gid, instead of uid) uid Set the default uid for inodes, and indicate to the - cifs kernel driver which local user mounted . If the server + cifs kernel driver which local user mounted. If the server supports the unix extensions the default uid is not used to fill in the owner fields of inodes (files) unless the "forceuid" parameter is specified. -- cgit v1.2.3 From 57ca7deb062abf56168d15f000c16e25f88a9cf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20Grafstr=C3=B6m?= Date: Tue, 4 Aug 2009 13:11:47 +0200 Subject: jffs2: Fix return value from jffs2_do_readpage_nolock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes "kernel BUG at fs/jffs2/file.c:251!". This pseudocode hopefully illustrates the scenario that triggers it: jffs2_write_begin { jffs2_do_readpage_nolock { jffs2_read_inode_range { jffs2_read_dnode { Data CRC 33c102e9 != calculated CRC 0ef77e7b for node at 005d42e4 return -EIO; } } ClearPageUptodate(pg); return 0; } } jffs2_write_end { BUG_ON(!PageUptodate(pg)); } Signed-off-by: Anders Grafström Signed-off-by: David Woodhouse --- fs/jffs2/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 5edc2bf2058..23c94753986 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) kunmap(pg); D2(printk(KERN_DEBUG "readpage finished\n")); - return 0; + return ret; } int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) -- cgit v1.2.3 From 54e346215e4fe2ca8c94c54e546cc61902060510 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2009 14:38:25 -0300 Subject: vfs: fix inode_init_always calling convention Currently inode_init_always calls into ->destroy_inode if the additional initialization fails. That's not only counter-intuitive because inode_init_always did not allocate the inode structure, but in case of XFS it's actively harmful as ->destroy_inode might delete the inode from a radix-tree that has never been added. This in turn might end up deleting the inode for the same inum that has been instanciated by another process and cause lots of cause subtile problems. Also in the case of re-initializing a reclaimable inode in XFS it would free an inode we still want to keep alive. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen --- fs/inode.c | 30 +++++++++++++++++------------- fs/xfs/xfs_iget.c | 17 +++++------------ 2 files changed, 22 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 901bad1e5f1..af2c05235cc 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode) * These are initializations that need to be done on every inode * allocation as the fields are not initialised by slab allocation. */ -struct inode *inode_init_always(struct super_block *sb, struct inode *inode) +int inode_init_always(struct super_block *sb, struct inode *inode) { static const struct address_space_operations empty_aops; static struct inode_operations empty_iops; static const struct file_operations empty_fops; - struct address_space *const mapping = &inode->i_data; inode->i_sb = sb; @@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) inode->dirtied_when = 0; if (security_inode_alloc(inode)) - goto out_free_inode; + goto out; /* allocate and initialize an i_integrity */ if (ima_inode_alloc(inode)) @@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) inode->i_fsnotify_mask = 0; #endif - return inode; + return 0; out_free_security: security_inode_free(inode); -out_free_inode: - if (inode->i_sb->s_op->destroy_inode) - inode->i_sb->s_op->destroy_inode(inode); - else - kmem_cache_free(inode_cachep, (inode)); - return NULL; +out: + return -ENOMEM; } EXPORT_SYMBOL(inode_init_always); @@ -220,9 +215,18 @@ static struct inode *alloc_inode(struct super_block *sb) else inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); - if (inode) - return inode_init_always(sb, inode); - return NULL; + if (!inode) + return NULL; + + if (unlikely(inode_init_always(sb, inode))) { + if (inode->i_sb->s_op->destroy_inode) + inode->i_sb->s_op->destroy_inode(inode); + else + kmem_cache_free(inode_cachep, inode); + return NULL; + } + + return inode; } void destroy_inode(struct inode *inode) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 5fcec6f020a..719c85b155f 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -64,6 +64,10 @@ xfs_inode_alloc( ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; + if (inode_init_always(mp->m_super, VFS_I(ip))) { + kmem_zone_free(xfs_inode_zone, ip); + return NULL; + } ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); @@ -105,17 +109,6 @@ xfs_inode_alloc( #ifdef XFS_DIR2_TRACE ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); #endif - /* - * Now initialise the VFS inode. We do this after the xfs_inode - * initialisation as internal failures will result in ->destroy_inode - * being called and that will pass down through the reclaim path and - * free the XFS inode. This path requires the XFS inode to already be - * initialised. Hence if this call fails, the xfs_inode has already - * been freed and we should not reference it at all in the error - * handling. - */ - if (!inode_init_always(mp->m_super, VFS_I(ip))) - return NULL; /* prevent anyone from using this yet */ VFS_I(ip)->i_state = I_NEW|I_LOCK; @@ -167,7 +160,7 @@ xfs_iget_cache_hit( * errors cleanly, then tag it so it can be set up correctly * later. */ - if (!inode_init_always(mp->m_super, VFS_I(ip))) { + if (inode_init_always(mp->m_super, VFS_I(ip))) { error = ENOMEM; goto out_error; } -- cgit v1.2.3 From 2e00c97e2c1d2ffc9e26252ca26b237678b0b772 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2009 14:38:29 -0300 Subject: vfs: add __destroy_inode When we want to tear down an inode that lost the add to the cache race in XFS we must not call into ->destroy_inode because that would delete the inode that won the race from the inode cache radix tree. This patch provides the __destroy_inode helper needed to fix this, the actual fix will be in th next patch. As XFS was the only reason destroy_inode was exported we shift the export to the new __destroy_inode. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen --- fs/inode.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index af2c05235cc..ae7b67e4866 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -229,7 +229,7 @@ static struct inode *alloc_inode(struct super_block *sb) return inode; } -void destroy_inode(struct inode *inode) +void __destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); ima_inode_free(inode); @@ -241,13 +241,17 @@ void destroy_inode(struct inode *inode) if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) posix_acl_release(inode->i_default_acl); #endif +} +EXPORT_SYMBOL(__destroy_inode); + +void destroy_inode(struct inode *inode) +{ + __destroy_inode(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else kmem_cache_free(inode_cachep, (inode)); } -EXPORT_SYMBOL(destroy_inode); - /* * These are initializations that only need to be done -- cgit v1.2.3 From b36ec0428a06fcbdb67d61e9e664154e5dd9a8c7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2009 14:38:34 -0300 Subject: xfs: fix freeing of inodes not yet added to the inode cache When freeing an inode that lost race getting added to the inode cache we must not call into ->destroy_inode, because that would delete the inode that won the race from the inode cache radix tree. This patch uses splits a new xfs_inode_free helper out of xfs_ireclaim and uses that plus __destroy_inode to make sure we really only free the memory allocted for the inode that lost the race, and not mess with the inode cache state. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Reported-by: Alex Samad Reported-by: Andrew Randrianasulu Reported-by: Stephane Reported-by: Tommy Reported-by: Miah Gregory Reported-by: Gabriel Barazer Reported-by: Leandro Lucarella Reported-by: Daniel Burr Reported-by: Nickolay Reported-by: Michael Guntsche Reported-by: Dan Carley Reported-by: Michael Ole Olsen Reported-by: Michael Weissenbacher Reported-by: Martin Spott Reported-by: Christian Kujau Tested-by: Michael Guntsche Tested-by: Dan Carley Tested-by: Christian Kujau --- fs/xfs/xfs_iget.c | 125 +++++++++++++++++++++++++++++------------------------ fs/xfs/xfs_inode.h | 17 -------- 2 files changed, 68 insertions(+), 74 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 719c85b155f..34ec86923f7 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -116,6 +116,71 @@ xfs_inode_alloc( return ip; } +STATIC void +xfs_inode_free( + struct xfs_inode *ip) +{ + switch (ip->i_d.di_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + xfs_idestroy_fork(ip, XFS_DATA_FORK); + break; + } + + if (ip->i_afp) + xfs_idestroy_fork(ip, XFS_ATTR_FORK); + +#ifdef XFS_INODE_TRACE + ktrace_free(ip->i_trace); +#endif +#ifdef XFS_BMAP_TRACE + ktrace_free(ip->i_xtrace); +#endif +#ifdef XFS_BTREE_TRACE + ktrace_free(ip->i_btrace); +#endif +#ifdef XFS_RW_TRACE + ktrace_free(ip->i_rwtrace); +#endif +#ifdef XFS_ILOCK_TRACE + ktrace_free(ip->i_lock_trace); +#endif +#ifdef XFS_DIR2_TRACE + ktrace_free(ip->i_dir_trace); +#endif + + if (ip->i_itemp) { + /* + * Only if we are shutting down the fs will we see an + * inode still in the AIL. If it is there, we should remove + * it to prevent a use-after-free from occurring. + */ + xfs_log_item_t *lip = &ip->i_itemp->ili_item; + struct xfs_ail *ailp = lip->li_ailp; + + ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || + XFS_FORCED_SHUTDOWN(ip->i_mount)); + if (lip->li_flags & XFS_LI_IN_AIL) { + spin_lock(&ailp->xa_lock); + if (lip->li_flags & XFS_LI_IN_AIL) + xfs_trans_ail_delete(ailp, lip); + else + spin_unlock(&ailp->xa_lock); + } + xfs_inode_item_destroy(ip); + ip->i_itemp = NULL; + } + + /* asserts to verify all state is correct here */ + ASSERT(atomic_read(&ip->i_iocount) == 0); + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!spin_is_locked(&ip->i_flags_lock)); + ASSERT(completion_done(&ip->i_flush)); + + kmem_zone_free(xfs_inode_zone, ip); +} + /* * Check the validity of the inode we just found it the cache */ @@ -292,7 +357,8 @@ out_preload_end: if (lock_flags) xfs_iunlock(ip, lock_flags); out_destroy: - xfs_destroy_inode(ip); + __destroy_inode(VFS_I(ip)); + xfs_inode_free(ip); return error; } @@ -497,62 +563,7 @@ xfs_ireclaim( xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - switch (ip->i_d.di_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - xfs_idestroy_fork(ip, XFS_DATA_FORK); - break; - } - - if (ip->i_afp) - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - -#ifdef XFS_INODE_TRACE - ktrace_free(ip->i_trace); -#endif -#ifdef XFS_BMAP_TRACE - ktrace_free(ip->i_xtrace); -#endif -#ifdef XFS_BTREE_TRACE - ktrace_free(ip->i_btrace); -#endif -#ifdef XFS_RW_TRACE - ktrace_free(ip->i_rwtrace); -#endif -#ifdef XFS_ILOCK_TRACE - ktrace_free(ip->i_lock_trace); -#endif -#ifdef XFS_DIR2_TRACE - ktrace_free(ip->i_dir_trace); -#endif - if (ip->i_itemp) { - /* - * Only if we are shutting down the fs will we see an - * inode still in the AIL. If it is there, we should remove - * it to prevent a use-after-free from occurring. - */ - xfs_log_item_t *lip = &ip->i_itemp->ili_item; - struct xfs_ail *ailp = lip->li_ailp; - - ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || - XFS_FORCED_SHUTDOWN(ip->i_mount)); - if (lip->li_flags & XFS_LI_IN_AIL) { - spin_lock(&ailp->xa_lock); - if (lip->li_flags & XFS_LI_IN_AIL) - xfs_trans_ail_delete(ailp, lip); - else - spin_unlock(&ailp->xa_lock); - } - xfs_inode_item_destroy(ip); - ip->i_itemp = NULL; - } - /* asserts to verify all state is correct here */ - ASSERT(atomic_read(&ip->i_iocount) == 0); - ASSERT(atomic_read(&ip->i_pincount) == 0); - ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(completion_done(&ip->i_flush)); - kmem_zone_free(xfs_inode_zone, ip); + xfs_inode_free(ip); } /* diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1804f866a71..65f24a3cc99 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -309,23 +309,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip) return &ip->i_vnode; } -/* - * Get rid of a partially initialized inode. - * - * We have to go through destroy_inode to make sure allocations - * from init_inode_always like the security data are undone. - * - * We mark the inode bad so that it takes the short cut in - * the reclaim path instead of going through the flush path - * which doesn't make sense for an inode that has never seen the - * light of day. - */ -static inline void xfs_destroy_inode(struct xfs_inode *ip) -{ - make_bad_inode(VFS_I(ip)); - return destroy_inode(VFS_I(ip)); -} - /* * i_flags helper functions */ -- cgit v1.2.3 From 69130c7cf96ea853dc5be599dd6a4b98907d39cc Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 6 Aug 2009 15:07:37 -0700 Subject: compat_ioctl: hook up compat handler for FIEMAP ioctl The FIEMAP_IOC_FIEMAP mapping ioctl was missing a 32-bit compat handler, which means that 32-bit suerspace on 64-bit kernels cannot use this ioctl command. The structure is nicely aligned, padded, and sized, so it is just this simple. Tested w/ 32-bit ioctl tester (from Josef) on a 64-bit kernel on ext4. Signed-off-by: Eric Sandeen Cc: Cc: Mark Lord Cc: Arnd Bergmann Cc: Josef Bacik Cc: Jan Kara Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/compat_ioctl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index f28f070a60f..f91fd51b32e 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -1905,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX) COMPATIBLE_IOCTL(FIOASYNC) COMPATIBLE_IOCTL(FIONBIO) COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ +COMPATIBLE_IOCTL(FS_IOC_FIEMAP) /* 0x00 */ COMPATIBLE_IOCTL(FIBMAP) COMPATIBLE_IOCTL(FIGETBSZ) -- cgit v1.2.3 From 2d8dd38a5aa0cc2490bbad9b75e77fa154e1d145 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Thu, 6 Aug 2009 15:07:39 -0700 Subject: vfs: mnt_want_write_file(): fix special file handling I suspect that mnt_want_write_file() may have wrong assumption. I think mnt_want_write_file() is assuming it increments ->mnt_writers if (file->f_mode & FMODE_WRITE). But, if it's special_file(), it is false? Signed-off-by: OGAWA Hirofumi Acked-by: Dave Hansen Cc: Al Viro Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 277c28a63ea..7230787d18b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -316,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write); */ int mnt_want_write_file(struct file *file) { - if (!(file->f_mode & FMODE_WRITE)) + struct inode *inode = file->f_dentry->d_inode; + if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) return mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); -- cgit v1.2.3 From 3440625d78711bee41a84cf29c3d8c579b522666 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 6 Aug 2009 15:09:34 -0700 Subject: flat: fix uninitialized ptr with shared libs The new credentials code broke load_flat_shared_library() as it now uses an uninitialized cred pointer. Reported-by: Bernd Schmidt Tested-by: Bernd Schmidt Cc: Mike Frysinger Cc: David Howells Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_flat.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 697f6b5f131..e92f229e3c6 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs) if (IS_ERR(bprm.file)) return res; + bprm.cred = prepare_exec_creds(); + res = -ENOMEM; + if (!bprm.cred) + goto out; + res = prepare_binprm(&bprm); if (res <= (unsigned long)-4096) res = load_flat_file(&bprm, libs, id, NULL); - if (bprm.file) { - allow_write_access(bprm.file); - fput(bprm.file); - bprm.file = NULL; - } + + abort_creds(bprm.cred); + +out: + allow_write_access(bprm.file); + fput(bprm.file); + return(res); } -- cgit v1.2.3 From 4baf8c9201e88546918cbfa61ea8062c38bc1644 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 7 Aug 2009 13:47:08 -0400 Subject: Btrfs: remove superfluous NULL pointer check in btrfs_rename() This takes care of the following entry from Dan's list: fs/btrfs/inode.c +4788 btrfs_rename(36) warning: variable derefenced before check 'old_inode' Reported-by: Dan Carpenter Cc: Jonathan Corbet Cc: Eugene Teo Cc: Julia Lawall Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3ea827ddf0f..04b53b5ebe5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4806,8 +4806,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * and the replacement file is large. Start IO on it now so * we don't add too much work to the end of the transaction */ - if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && - new_inode->i_size && + if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) filemap_flush(old_inode->i_mapping); -- cgit v1.2.3 From 60f2e8f8a07331097a57ec4abcdc680405579377 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 7 Aug 2009 13:51:33 -0400 Subject: Btrfs: correct error-handling zlib error handling find_zlib_workspace returns an ERR_PTR value in an error case instead of NULL. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @match exists@ expression x, E; statement S1, S2; @@ x = find_zlib_workspace(...) ... when != x = E ( * if (x == NULL || ...) S1 else S2 | * if (x == NULL && ...) S1 else S2 ) // Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/zlib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index ecfbce836d3..3e2b90eaa23 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, *total_in = 0; workspace = find_zlib_workspace(); - if (!workspace) + if (IS_ERR(workspace)) return -1; if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { @@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, char *kaddr; workspace = find_zlib_workspace(); - if (!workspace) + if (IS_ERR(workspace)) return -ENOMEM; data_in = kmap(pages_in[page_in_index]); @@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, return -ENOMEM; workspace = find_zlib_workspace(); - if (!workspace) + if (IS_ERR(workspace)) return -ENOMEM; workspace->inf_strm.next_in = data_in; -- cgit v1.2.3 From ceab36edd3d3ad3ffd01d41d6d1e05ac1ff8357e Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Fri, 7 Aug 2009 13:51:33 -0400 Subject: Btrfs: fix balancing oops when invalidate_inode_pages2 returns EBUSY invalidate_inode_pages2_range may return -EBUSY occasionally which results Oops. This patch fixes the issue by moving invalidate_inode_pages2_range into a loop and keeping calling it until the return value is not -EBUSY. The EBUSY return is temporary, and can happen when the btrfs release page function is unable to release a page because the EXTENT_LOCK bit is set. Signed-off-by: Yan Zheng Signed-off-by: Chris Mason --- fs/btrfs/relocation.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e71264d1c2c..c04f7f21260 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2553,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len) last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; /* make sure the dirty trick played by the caller work */ - ret = invalidate_inode_pages2_range(inode->i_mapping, - first_index, last_index); + while (1) { + ret = invalidate_inode_pages2_range(inode->i_mapping, + first_index, last_index); + if (ret != -EBUSY) + break; + schedule_timeout(HZ/10); + } if (ret) goto out_unlock; -- cgit v1.2.3