aboutsummaryrefslogtreecommitdiff
path: root/fs/inode.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c37
1 files changed, 16 insertions, 21 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 6d695037a0a..5bc97507eea 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -282,6 +282,13 @@ static void dispose_list(struct list_head *head)
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
+
+ spin_lock(&inode_lock);
+ hlist_del_init(&inode->i_hash);
+ list_del_init(&inode->i_sb_list);
+ spin_unlock(&inode_lock);
+
+ wake_up_inode(inode);
destroy_inode(inode);
nr_disposed++;
}
@@ -317,8 +324,6 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
inode = list_entry(tmp, struct inode, i_sb_list);
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
- hlist_del_init(&inode->i_hash);
- list_del(&inode->i_sb_list);
list_move(&inode->i_list, dispose);
inode->i_state |= I_FREEING;
count++;
@@ -439,8 +444,6 @@ static void prune_icache(int nr_to_scan)
if (!can_unuse(inode))
continue;
}
- hlist_del_init(&inode->i_hash);
- list_del_init(&inode->i_sb_list);
list_move(&inode->i_list, &freeable);
inode->i_state |= I_FREEING;
nr_pruned++;
@@ -1244,29 +1247,21 @@ int inode_wait(void *word)
}
/*
- * If we try to find an inode in the inode hash while it is being deleted, we
- * have to wait until the filesystem completes its deletion before reporting
- * that it isn't found. This is because iget will immediately call
- * ->read_inode, and we want to be sure that evidence of the deletion is found
- * by ->read_inode.
+ * If we try to find an inode in the inode hash while it is being
+ * deleted, we have to wait until the filesystem completes its
+ * deletion before reporting that it isn't found. This function waits
+ * until the deletion _might_ have completed. Callers are responsible
+ * to recheck inode state.
+ *
+ * It doesn't matter if I_LOCK is not set initially, a call to
+ * wake_up_inode() after removing from the hash list will DTRT.
+ *
* This is called with inode_lock held.
*/
static void __wait_on_freeing_inode(struct inode *inode)
{
wait_queue_head_t *wq;
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
-
- /*
- * I_FREEING and I_CLEAR are cleared in process context under
- * inode_lock, so we have to give the tasks who would clear them
- * a chance to run and acquire inode_lock.
- */
- if (!(inode->i_state & I_LOCK)) {
- spin_unlock(&inode_lock);
- yield();
- spin_lock(&inode_lock);
- return;
- }
wq = bit_waitqueue(&inode->i_state, __I_LOCK);
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode_lock);