aboutsummaryrefslogtreecommitdiff
path: root/fs/inode.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-02 22:41:36 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-02 22:41:36 +0100
commit923a789b49c7269a0245d5af6afe486188d940df (patch)
treec3f168427372e64f7467a794f313416da5086ba0 /fs/inode.c
parent103ceffb9501531f6931df6aebc11a05189201f0 (diff)
parentb840d79631c882786925303c2b0f4fefc31845ed (diff)
Merge branch 'linus' into x86/cleanups
Conflicts: arch/x86/kernel/reboot.c
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c268
1 files changed, 185 insertions, 83 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 0487ddba139..7de1cda9248 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode)
wake_up_bit(&inode->i_state, __I_LOCK);
}
-static struct inode *alloc_inode(struct super_block *sb)
+/**
+ * inode_init_always - perform inode structure intialisation
+ * @sb - superblock inode belongs to.
+ * @inode - inode to initialise
+ *
+ * These are initializations that need to be done on every inode
+ * allocation as the fields are not initialised by slab allocation.
+ */
+struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct address_space_operations empty_aops;
static struct inode_operations empty_iops;
static const struct file_operations empty_fops;
- struct inode *inode;
- if (sb->s_op->alloc_inode)
- inode = sb->s_op->alloc_inode(sb);
- else
- inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
-
- if (inode) {
- struct address_space * const mapping = &inode->i_data;
-
- inode->i_sb = sb;
- inode->i_blkbits = sb->s_blocksize_bits;
- inode->i_flags = 0;
- atomic_set(&inode->i_count, 1);
- inode->i_op = &empty_iops;
- inode->i_fop = &empty_fops;
- inode->i_nlink = 1;
- atomic_set(&inode->i_writecount, 0);
- inode->i_size = 0;
- inode->i_blocks = 0;
- inode->i_bytes = 0;
- inode->i_generation = 0;
+ struct address_space * const mapping = &inode->i_data;
+
+ inode->i_sb = sb;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_flags = 0;
+ atomic_set(&inode->i_count, 1);
+ inode->i_op = &empty_iops;
+ inode->i_fop = &empty_fops;
+ inode->i_nlink = 1;
+ atomic_set(&inode->i_writecount, 0);
+ inode->i_size = 0;
+ inode->i_blocks = 0;
+ inode->i_bytes = 0;
+ inode->i_generation = 0;
#ifdef CONFIG_QUOTA
- memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
+ memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
#endif
- inode->i_pipe = NULL;
- inode->i_bdev = NULL;
- inode->i_cdev = NULL;
- inode->i_rdev = 0;
- inode->dirtied_when = 0;
- if (security_inode_alloc(inode)) {
- if (inode->i_sb->s_op->destroy_inode)
- inode->i_sb->s_op->destroy_inode(inode);
- else
- kmem_cache_free(inode_cachep, (inode));
- return NULL;
- }
+ inode->i_pipe = NULL;
+ inode->i_bdev = NULL;
+ inode->i_cdev = NULL;
+ inode->i_rdev = 0;
+ inode->dirtied_when = 0;
+ if (security_inode_alloc(inode)) {
+ if (inode->i_sb->s_op->destroy_inode)
+ inode->i_sb->s_op->destroy_inode(inode);
+ else
+ kmem_cache_free(inode_cachep, (inode));
+ return NULL;
+ }
- spin_lock_init(&inode->i_lock);
- lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
+ spin_lock_init(&inode->i_lock);
+ lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
- mutex_init(&inode->i_mutex);
- lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
+ mutex_init(&inode->i_mutex);
+ lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
- init_rwsem(&inode->i_alloc_sem);
- lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
+ init_rwsem(&inode->i_alloc_sem);
+ lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
- mapping->a_ops = &empty_aops;
- mapping->host = inode;
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
- mapping->assoc_mapping = NULL;
- mapping->backing_dev_info = &default_backing_dev_info;
- mapping->writeback_index = 0;
+ mapping->a_ops = &empty_aops;
+ mapping->host = inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = &default_backing_dev_info;
+ mapping->writeback_index = 0;
- /*
- * If the block_device provides a backing_dev_info for client
- * inodes then use that. Otherwise the inode share the bdev's
- * backing_dev_info.
- */
- if (sb->s_bdev) {
- struct backing_dev_info *bdi;
+ /*
+ * If the block_device provides a backing_dev_info for client
+ * inodes then use that. Otherwise the inode share the bdev's
+ * backing_dev_info.
+ */
+ if (sb->s_bdev) {
+ struct backing_dev_info *bdi;
- bdi = sb->s_bdev->bd_inode_backing_dev_info;
- if (!bdi)
- bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
- mapping->backing_dev_info = bdi;
- }
- inode->i_private = NULL;
- inode->i_mapping = mapping;
+ bdi = sb->s_bdev->bd_inode_backing_dev_info;
+ if (!bdi)
+ bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
+ mapping->backing_dev_info = bdi;
}
+ inode->i_private = NULL;
+ inode->i_mapping = mapping;
+
return inode;
}
+EXPORT_SYMBOL(inode_init_always);
+
+static struct inode *alloc_inode(struct super_block *sb)
+{
+ struct inode *inode;
+
+ if (sb->s_op->alloc_inode)
+ inode = sb->s_op->alloc_inode(sb);
+ else
+ inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
+
+ if (inode)
+ return inode_init_always(sb, inode);
+ return NULL;
+}
void destroy_inode(struct inode *inode)
{
@@ -196,6 +212,7 @@ void destroy_inode(struct inode *inode)
else
kmem_cache_free(inode_cachep, (inode));
}
+EXPORT_SYMBOL(destroy_inode);
/*
@@ -534,6 +551,49 @@ repeat:
return node ? inode : NULL;
}
+static unsigned long hash(struct super_block *sb, unsigned long hashval)
+{
+ unsigned long tmp;
+
+ tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+ L1_CACHE_BYTES;
+ tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
+ return tmp & I_HASHMASK;
+}
+
+static inline void
+__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
+ struct inode *inode)
+{
+ inodes_stat.nr_inodes++;
+ list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
+ if (head)
+ hlist_add_head(&inode->i_hash, head);
+}
+
+/**
+ * inode_add_to_lists - add a new inode to relevant lists
+ * @sb - superblock inode belongs to.
+ * @inode - inode to mark in use
+ *
+ * When an inode is allocated it needs to be accounted for, added to the in use
+ * list, the owning superblock and the inode hash. This needs to be done under
+ * the inode_lock, so export a function to do this rather than the inode lock
+ * itself. We calculate the hash list to add to here so it is all internal
+ * which requires the caller to have already set up the inode number in the
+ * inode to add.
+ */
+void inode_add_to_lists(struct super_block *sb, struct inode *inode)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
+
+ spin_lock(&inode_lock);
+ __inode_add_to_lists(sb, head, inode);
+ spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL_GPL(inode_add_to_lists);
+
/**
* new_inode - obtain an inode
* @sb: superblock
@@ -561,9 +621,7 @@ struct inode *new_inode(struct super_block *sb)
inode = alloc_inode(sb);
if (inode) {
spin_lock(&inode_lock);
- inodes_stat.nr_inodes++;
- list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_sb_list, &sb->s_inodes);
+ __inode_add_to_lists(sb, NULL, inode);
inode->i_ino = ++last_ino;
inode->i_state = 0;
spin_unlock(&inode_lock);
@@ -622,10 +680,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h
if (set(inode, data))
goto set_failed;
- inodes_stat.nr_inodes++;
- list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_sb_list, &sb->s_inodes);
- hlist_add_head(&inode->i_hash, head);
+ __inode_add_to_lists(sb, head, inode);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
@@ -671,10 +726,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
old = find_inode_fast(sb, head, ino);
if (!old) {
inode->i_ino = ino;
- inodes_stat.nr_inodes++;
- list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_sb_list, &sb->s_inodes);
- hlist_add_head(&inode->i_hash, head);
+ __inode_add_to_lists(sb, head, inode);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
@@ -698,16 +750,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he
return inode;
}
-static unsigned long hash(struct super_block *sb, unsigned long hashval)
-{
- unsigned long tmp;
-
- tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
- L1_CACHE_BYTES;
- tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
- return tmp & I_HASHMASK;
-}
-
/**
* iunique - get a unique inode number
* @sb: superblock
@@ -990,6 +1032,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
EXPORT_SYMBOL(iget_locked);
+int insert_inode_locked(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ ino_t ino = inode->i_ino;
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
+ struct inode *old;
+
+ inode->i_state |= I_LOCK|I_NEW;
+ while (1) {
+ spin_lock(&inode_lock);
+ old = find_inode_fast(sb, head, ino);
+ if (likely(!old)) {
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode_lock);
+ return 0;
+ }
+ __iget(old);
+ spin_unlock(&inode_lock);
+ wait_on_inode(old);
+ if (unlikely(!hlist_unhashed(&old->i_hash))) {
+ iput(old);
+ return -EBUSY;
+ }
+ iput(old);
+ }
+}
+
+EXPORT_SYMBOL(insert_inode_locked);
+
+int insert_inode_locked4(struct inode *inode, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data)
+{
+ struct super_block *sb = inode->i_sb;
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *old;
+
+ inode->i_state |= I_LOCK|I_NEW;
+
+ while (1) {
+ spin_lock(&inode_lock);
+ old = find_inode(sb, head, test, data);
+ if (likely(!old)) {
+ hlist_add_head(&inode->i_hash, head);
+ spin_unlock(&inode_lock);
+ return 0;
+ }
+ __iget(old);
+ spin_unlock(&inode_lock);
+ wait_on_inode(old);
+ if (unlikely(!hlist_unhashed(&old->i_hash))) {
+ iput(old);
+ return -EBUSY;
+ }
+ iput(old);
+ }
+}
+
+EXPORT_SYMBOL(insert_inode_locked4);
+
/**
* __insert_inode_hash - hash an inode
* @inode: unhashed inode
@@ -1292,6 +1393,7 @@ int inode_wait(void *word)
schedule();
return 0;
}
+EXPORT_SYMBOL(inode_wait);
/*
* If we try to find an inode in the inode hash while it is being