aboutsummaryrefslogtreecommitdiff
path: root/fs/ext4
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/balloc.c112
-rw-r--r--fs/ext4/dir.c7
-rw-r--r--fs/ext4/extents.c14
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/group.h27
-rw-r--r--fs/ext4/ialloc.c151
-rw-r--r--fs/ext4/inode.c18
-rw-r--r--fs/ext4/namei.c20
-rw-r--r--fs/ext4/resize.c59
-rw-r--r--fs/ext4/super.c134
-rw-r--r--fs/ext4/xattr.c7
11 files changed, 417 insertions, 134 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index b74bf436844..e906b65448e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -20,6 +20,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include "group.h"
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
@@ -42,6 +43,94 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
}
+/* Initializes an uninitialized block bitmap if given, and returns the
+ * number of blocks free in the group. */
+unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
+ int block_group, struct ext4_group_desc *gdp)
+{
+ unsigned long start;
+ int bit, bit_max;
+ unsigned free_blocks, group_blocks;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (bh) {
+ J_ASSERT_BH(bh, buffer_locked(bh));
+
+ /* If checksum is bad mark all blocks used to prevent allocation
+ * essentially implementing a per-group read-only flag. */
+ if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+ ext4_error(sb, __FUNCTION__,
+ "Checksum bad for group %u\n", block_group);
+ gdp->bg_free_blocks_count = 0;
+ gdp->bg_free_inodes_count = 0;
+ gdp->bg_itable_unused = 0;
+ memset(bh->b_data, 0xff, sb->s_blocksize);
+ return 0;
+ }
+ memset(bh->b_data, 0, sb->s_blocksize);
+ }
+
+ /* Check for superblock and gdt backups in this group */
+ bit_max = ext4_bg_has_super(sb, block_group);
+
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
+ block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
+ sbi->s_desc_per_block) {
+ if (bit_max) {
+ bit_max += ext4_bg_num_gdb(sb, block_group);
+ bit_max +=
+ le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
+ }
+ } else { /* For META_BG_BLOCK_GROUPS */
+ int group_rel = (block_group -
+ le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
+ EXT4_DESC_PER_BLOCK(sb);
+ if (group_rel == 0 || group_rel == 1 ||
+ (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
+ bit_max += 1;
+ }
+
+ if (block_group == sbi->s_groups_count - 1) {
+ /*
+ * Even though mke2fs always initialize first and last group
+ * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
+ * to make sure we calculate the right free blocks
+ */
+ group_blocks = ext4_blocks_count(sbi->s_es) -
+ le32_to_cpu(sbi->s_es->s_first_data_block) -
+ (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
+ } else {
+ group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
+ }
+
+ free_blocks = group_blocks - bit_max;
+
+ if (bh) {
+ for (bit = 0; bit < bit_max; bit++)
+ ext4_set_bit(bit, bh->b_data);
+
+ start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
+ le32_to_cpu(sbi->s_es->s_first_data_block);
+
+ /* Set bits for block and inode bitmaps, and inode table */
+ ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
+ ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
+ for (bit = (ext4_inode_table(sb, gdp) - start),
+ bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
+ ext4_set_bit(bit, bh->b_data);
+
+ /*
+ * Also if the number of blocks within the group is
+ * less than the blocksize * 8 ( which is the size
+ * of bitmap ), set rest of the block bitmap to 1
+ */
+ mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
+ }
+
+ return free_blocks - sbi->s_itb_per_group - 2;
+}
+
+
/*
* The free blocks are managed by bitmaps. A file system contains several
* blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
@@ -119,7 +208,7 @@ block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map)
*
* Return buffer_head on success or NULL in case of failure.
*/
-static struct buffer_head *
+struct buffer_head *
read_block_bitmap(struct super_block *sb, unsigned int block_group)
{
int i;
@@ -127,11 +216,24 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
struct buffer_head * bh = NULL;
ext4_fsblk_t bitmap_blk;
- desc = ext4_get_group_desc (sb, block_group, NULL);
+ desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
return NULL;
bitmap_blk = ext4_block_bitmap(sb, desc);
- bh = sb_bread(sb, bitmap_blk);
+ if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ bh = sb_getblk(sb, bitmap_blk);
+ if (!buffer_uptodate(bh)) {
+ lock_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ ext4_init_block_bitmap(sb, bh, block_group,
+ desc);
+ set_buffer_uptodate(bh);
+ }
+ unlock_buffer(bh);
+ }
+ } else {
+ bh = sb_bread(sb, bitmap_blk);
+ }
if (!bh)
ext4_error (sb, __FUNCTION__,
"Cannot read block bitmap - "
@@ -627,6 +729,7 @@ do_more:
desc->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
group_freed);
+ desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_add(&sbi->s_freeblocks_counter, count);
@@ -1685,8 +1788,11 @@ allocated:
ret_block, goal_hits, goal_attempts);
spin_lock(sb_bgl_lock(sbi, group_no));
+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
gdp->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 0fb1e62b20d..f612bef9831 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -47,9 +47,7 @@ const struct file_operations ext4_dir_operations = {
.compat_ioctl = ext4_compat_ioctl,
#endif
.fsync = ext4_sync_file, /* BKL held */
-#ifdef CONFIG_EXT4_INDEX
.release = ext4_release_dir,
-#endif
};
@@ -107,7 +105,6 @@ static int ext4_readdir(struct file * filp,
sb = inode->i_sb;
-#ifdef CONFIG_EXT4_INDEX
if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX) &&
((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
@@ -123,7 +120,6 @@ static int ext4_readdir(struct file * filp,
*/
EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
}
-#endif
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
@@ -232,7 +228,6 @@ out:
return ret;
}
-#ifdef CONFIG_EXT4_INDEX
/*
* These functions convert from the major/minor hash to an f_pos
* value.
@@ -518,5 +513,3 @@ static int ext4_release_dir (struct inode * inode, struct file * filp)
return 0;
}
-
-#endif
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 78beb096f57..85287742f2a 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -33,7 +33,7 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/ext4_jbd2.h>
-#include <linux/jbd.h>
+#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
@@ -52,7 +52,7 @@ static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
{
ext4_fsblk_t block;
- block = le32_to_cpu(ex->ee_start);
+ block = le32_to_cpu(ex->ee_start_lo);
block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
return block;
}
@@ -65,7 +65,7 @@ static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
{
ext4_fsblk_t block;
- block = le32_to_cpu(ix->ei_leaf);
+ block = le32_to_cpu(ix->ei_leaf_lo);
block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
return block;
}
@@ -77,7 +77,7 @@ static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
*/
static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
{
- ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+ ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
}
@@ -88,7 +88,7 @@ static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
*/
static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
{
- ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+ ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
}
@@ -1409,8 +1409,7 @@ has_space:
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
nearex = path[depth].p_ext;
nearex->ee_block = newext->ee_block;
- nearex->ee_start = newext->ee_start;
- nearex->ee_start_hi = newext->ee_start_hi;
+ ext4_ext_store_pblock(nearex, ext_pblock(newext));
nearex->ee_len = newext->ee_len;
merge:
@@ -2177,7 +2176,6 @@ int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
}
/* ex2: iblock to iblock + maxblocks-1 : initialised */
ex2->ee_block = cpu_to_le32(iblock);
- ex2->ee_start = cpu_to_le32(newblock);
ext4_ext_store_pblock(ex2, newblock);
ex2->ee_len = cpu_to_le16(allocated);
if (ex2 != ex)
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 2a167d7131f..8d50879d1c2 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -47,7 +47,7 @@ int ext4_sync_file(struct file * file, struct dentry *dentry, int datasync)
struct inode *inode = dentry->d_inode;
int ret = 0;
- J_ASSERT(ext4_journal_current_handle() == 0);
+ J_ASSERT(ext4_journal_current_handle() == NULL);
/*
* data=writeback:
diff --git a/fs/ext4/group.h b/fs/ext4/group.h
new file mode 100644
index 00000000000..1577910bb58
--- /dev/null
+++ b/fs/ext4/group.h
@@ -0,0 +1,27 @@
+/*
+ * linux/fs/ext4/group.h
+ *
+ * Copyright (C) 2007 Cluster File Systems, Inc
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#ifndef _LINUX_EXT4_GROUP_H
+#define _LINUX_EXT4_GROUP_H
+
+extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group,
+ struct ext4_group_desc *gdp);
+extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
+ struct ext4_group_desc *gdp);
+struct buffer_head *read_block_bitmap(struct super_block *sb,
+ unsigned int block_group);
+extern unsigned ext4_init_block_bitmap(struct super_block *sb,
+ struct buffer_head *bh, int group,
+ struct ext4_group_desc *desc);
+#define ext4_free_blocks_after_init(sb, group, desc) \
+ ext4_init_block_bitmap(sb, NULL, group, desc)
+extern unsigned ext4_init_inode_bitmap(struct super_block *sb,
+ struct buffer_head *bh, int group,
+ struct ext4_group_desc *desc);
+extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+#endif /* _LINUX_EXT4_GROUP_H */
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index d0c7793d939..c61f37fd3f0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -28,6 +28,7 @@
#include "xattr.h"
#include "acl.h"
+#include "group.h"
/*
* ialloc.c contains the inodes allocation and deallocation routines
@@ -43,6 +44,52 @@
* the free blocks count in the block.
*/
+/*
+ * To avoid calling the atomic setbit hundreds or thousands of times, we only
+ * need to use it within a single byte (to ensure we get endianness right).
+ * We can use memset for the rest of the bitmap as there are no other users.
+ */
+void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+{
+ int i;
+
+ if (start_bit >= end_bit)
+ return;
+
+ ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
+ for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
+ ext4_set_bit(i, bitmap);
+ if (i < end_bit)
+ memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+}
+
+/* Initializes an uninitialized inode bitmap */
+unsigned ext4_init_inode_bitmap(struct super_block *sb,
+ struct buffer_head *bh, int block_group,
+ struct ext4_group_desc *gdp)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ J_ASSERT_BH(bh, buffer_locked(bh));
+
+ /* If checksum is bad mark all blocks and inodes use to prevent
+ * allocation, essentially implementing a per-group read-only flag. */
+ if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
+ ext4_error(sb, __FUNCTION__, "Checksum bad for group %u\n",
+ block_group);
+ gdp->bg_free_blocks_count = 0;
+ gdp->bg_free_inodes_count = 0;
+ gdp->bg_itable_unused = 0;
+ memset(bh->b_data, 0xff, sb->s_blocksize);
+ return 0;
+ }
+
+ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+ mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
+ bh->b_data);
+
+ return EXT4_INODES_PER_GROUP(sb);
+}
/*
* Read the inode allocation bitmap for a given block_group, reading
@@ -59,8 +106,20 @@ read_inode_bitmap(struct super_block * sb, unsigned long block_group)
desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
goto error_out;
-
- bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
+ if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+ bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));
+ if (!buffer_uptodate(bh)) {
+ lock_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ ext4_init_inode_bitmap(sb, bh, block_group,
+ desc);
+ set_buffer_uptodate(bh);
+ }
+ unlock_buffer(bh);
+ }
+ } else {
+ bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
+ }
if (!bh)
ext4_error(sb, "read_inode_bitmap",
"Cannot read inode bitmap - "
@@ -169,6 +228,8 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
if (is_directory)
gdp->bg_used_dirs_count = cpu_to_le16(
le16_to_cpu(gdp->bg_used_dirs_count) - 1);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi,
+ block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (is_directory)
@@ -435,7 +496,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
struct ext4_sb_info *sbi;
int err = 0;
struct inode *ret;
- int i;
+ int i, free = 0;
/* Cannot create files in a deleted directory */
if (!dir || !dir->i_nlink)
@@ -517,11 +578,13 @@ repeat_in_this_group:
goto out;
got:
- ino += group * EXT4_INODES_PER_GROUP(sb) + 1;
- if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
- ext4_error (sb, "ext4_new_inode",
- "reserved inode or inode > inodes count - "
- "block_group = %d, inode=%lu", group, ino);
+ ino++;
+ if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
+ ino > EXT4_INODES_PER_GROUP(sb)) {
+ ext4_error(sb, __FUNCTION__,
+ "reserved inode or inode > inodes count - "
+ "block_group = %d, inode=%lu", group,
+ ino + group * EXT4_INODES_PER_GROUP(sb));
err = -EIO;
goto fail;
}
@@ -529,13 +592,78 @@ got:
BUFFER_TRACE(bh2, "get_write_access");
err = ext4_journal_get_write_access(handle, bh2);
if (err) goto fail;
+
+ /* We may have to initialize the block bitmap if it isn't already */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+ gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ struct buffer_head *block_bh = read_block_bitmap(sb, group);
+
+ BUFFER_TRACE(block_bh, "get block bitmap access");
+ err = ext4_journal_get_write_access(handle, block_bh);
+ if (err) {
+ brelse(block_bh);
+ goto fail;
+ }
+
+ free = 0;
+ spin_lock(sb_bgl_lock(sbi, group));
+ /* recheck and clear flag under lock if we still need to */
+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ free = ext4_free_blocks_after_init(sb, group, gdp);
+ gdp->bg_free_blocks_count = cpu_to_le16(free);
+ }
+ spin_unlock(sb_bgl_lock(sbi, group));
+
+ /* Don't need to dirty bitmap block if we didn't change it */
+ if (free) {
+ BUFFER_TRACE(block_bh, "dirty block bitmap");
+ err = ext4_journal_dirty_metadata(handle, block_bh);
+ }
+
+ brelse(block_bh);
+ if (err)
+ goto fail;
+ }
+
spin_lock(sb_bgl_lock(sbi, group));
+ /* If we didn't allocate from within the initialized part of the inode
+ * table then we need to initialize up to this inode. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
+
+ /* When marking the block group with
+ * ~EXT4_BG_INODE_UNINIT we don't want to depend
+ * on the value of bg_itable_unsed even though
+ * mke2fs could have initialized the same for us.
+ * Instead we calculated the value below
+ */
+
+ free = 0;
+ } else {
+ free = EXT4_INODES_PER_GROUP(sb) -
+ le16_to_cpu(gdp->bg_itable_unused);
+ }
+
+ /*
+ * Check the relative inode number against the last used
+ * relative inode number in this group. if it is greater
+ * we need to update the bg_itable_unused count
+ *
+ */
+ if (ino > free)
+ gdp->bg_itable_unused =
+ cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
+ }
+
gdp->bg_free_inodes_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
if (S_ISDIR(mode)) {
gdp->bg_used_dirs_count =
cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
}
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
spin_unlock(sb_bgl_lock(sbi, group));
BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
err = ext4_journal_dirty_metadata(handle, bh2);
@@ -557,7 +685,7 @@ got:
inode->i_gid = current->fsgid;
inode->i_mode = mode;
- inode->i_ino = ino;
+ inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
@@ -573,11 +701,6 @@ got:
/* dirsync only applies to directories */
if (!S_ISDIR(mode))
ei->i_flags &= ~EXT4_DIRSYNC_FL;
-#ifdef EXT4_FRAGMENTS
- ei->i_faddr = 0;
- ei->i_frag_no = 0;
- ei->i_frag_size = 0;
-#endif
ei->i_file_acl = 0;
ei->i_dir_acl = 0;
ei->i_dtime = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0df2b1e06d0..5489703d957 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1027,7 +1027,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
}
if (buffer_new(&dummy)) {
J_ASSERT(create != 0);
- J_ASSERT(handle != 0);
+ J_ASSERT(handle != NULL);
/*
* Now that we do not always journal data, we should
@@ -2711,11 +2711,6 @@ void ext4_read_inode(struct inode * inode)
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
-#ifdef EXT4_FRAGMENTS
- ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
- ei->i_frag_no = raw_inode->i_frag;
- ei->i_frag_size = raw_inode->i_fsize;
-#endif
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
@@ -2860,11 +2855,6 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-#ifdef EXT4_FRAGMENTS
- raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
- raw_inode->i_frag = ei->i_frag_no;
- raw_inode->i_fsize = ei->i_frag_size;
-#endif
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
raw_inode->i_file_acl_high =
@@ -3243,12 +3233,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
iloc, handle);
if (ret) {
EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
- if (mnt_count != sbi->s_es->s_mnt_count) {
+ if (mnt_count !=
+ le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb, __FUNCTION__,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
- mnt_count = sbi->s_es->s_mnt_count;
+ mnt_count =
+ le16_to_cpu(sbi->s_es->s_mnt_count);
}
}
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5fdb862e71c..94ee6f315dc 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -144,7 +144,6 @@ struct dx_map_entry
u16 size;
};
-#ifdef CONFIG_EXT4_INDEX
static inline unsigned dx_get_block (struct dx_entry *entry);
static void dx_set_block (struct dx_entry *entry, unsigned value);
static inline unsigned dx_get_hash (struct dx_entry *entry);
@@ -766,8 +765,6 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
dx_set_block(new, block);
dx_set_count(entries, count + 1);
}
-#endif
-
static void ext4_update_dx_flag(struct inode *inode)
{
@@ -869,7 +866,6 @@ static struct buffer_head * ext4_find_entry (struct dentry *dentry,
name = dentry->d_name.name;
if (namelen > EXT4_NAME_LEN)
return NULL;
-#ifdef CONFIG_EXT4_INDEX
if (is_dx(dir)) {
bh = ext4_dx_find_entry(dentry, res_dir, &err);
/*
@@ -881,7 +877,6 @@ static struct buffer_head * ext4_find_entry (struct dentry *dentry,
return bh;
dxtrace(printk("ext4_find_entry: dx failed, falling back\n"));
}
-#endif
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
start = EXT4_I(dir)->i_dir_start_lookup;
if (start >= nblocks)
@@ -957,7 +952,6 @@ cleanup_and_exit:
return ret;
}
-#ifdef CONFIG_EXT4_INDEX
static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
struct ext4_dir_entry_2 **res_dir, int *err)
{
@@ -1025,7 +1019,6 @@ errout:
dx_release (frames);
return NULL;
}
-#endif
static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
{
@@ -1121,7 +1114,6 @@ static inline void ext4_set_de_type(struct super_block *sb,
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
-#ifdef CONFIG_EXT4_INDEX
/*
* Move count entries from end of map between two memory locations.
* Returns pointer to last entry moved.
@@ -1266,8 +1258,6 @@ errout:
*error = err;
return NULL;
}
-#endif
-
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
@@ -1364,7 +1354,6 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
return 0;
}
-#ifdef CONFIG_EXT4_INDEX
/*
* This converts a one block unindexed directory to a 3 block indexed
* directory, and adds the dentry to the indexed directory.
@@ -1443,7 +1432,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
-#endif
/*
* ext4_add_entry()
@@ -1464,9 +1452,7 @@ static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
struct ext4_dir_entry_2 *de;
struct super_block * sb;
int retval;
-#ifdef CONFIG_EXT4_INDEX
int dx_fallback=0;
-#endif
unsigned blocksize;
u32 block, blocks;
@@ -1474,7 +1460,6 @@ static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
blocksize = sb->s_blocksize;
if (!dentry->d_name.len)
return -EINVAL;
-#ifdef CONFIG_EXT4_INDEX
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
@@ -1483,7 +1468,6 @@ static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
}
-#endif
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0, offset = 0; block < blocks; block++) {
bh = ext4_bread(handle, dir, block, 0, &retval);
@@ -1493,11 +1477,9 @@ static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
if (retval != -ENOSPC)
return retval;
-#ifdef CONFIG_EXT4_INDEX
if (blocks == 1 && !dx_fallback &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
return make_indexed_dir(handle, dentry, inode, bh);
-#endif
brelse(bh);
}
bh = ext4_append(handle, dir, &block, &retval);
@@ -1509,7 +1491,6 @@ static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
-#ifdef CONFIG_EXT4_INDEX
/*
* Returns 0 for success, or a negative error value
*/
@@ -1644,7 +1625,6 @@ cleanup:
dx_release(frames);
return err;
}
-#endif
/*
* ext4_delete_entry deletes a directory entry by merging it with the
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 472fc0d3e1c..bd8a52bb399 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include "group.h"
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
@@ -140,22 +141,29 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
}
/*
- * To avoid calling the atomic setbit hundreds or thousands of times, we only
- * need to use it within a single byte (to ensure we get endianness right).
- * We can use memset for the rest of the bitmap as there are no other users.
+ * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
+ * If that fails, restart the transaction & regain write access for the
+ * buffer head which is used for block_bitmap modifications.
*/
-static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+static int extend_or_restart_transaction(handle_t *handle, int thresh,
+ struct buffer_head *bh)
{
- int i;
+ int err;
+
+ if (handle->h_buffer_credits >= thresh)
+ return 0;
- if (start_bit >= end_bit)
- return;
+ err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
+ if (err < 0)
+ return err;
+ if (err) {
+ if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
+ return err;
+ if ((err = ext4_journal_get_write_access(handle, bh)))
+ return err;
+ }
- ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
- for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
- ext4_set_bit(i, bitmap);
- if (i < end_bit)
- memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+ return 0;
}
/*
@@ -180,8 +188,9 @@ static int setup_new_group_blocks(struct super_block *sb,
int i;
int err = 0, err2;
- handle = ext4_journal_start_sb(sb, reserved_gdb + gdblocks +
- 2 + sbi->s_itb_per_group);
+ /* This transaction may be extended/restarted along the way */
+ handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -208,6 +217,9 @@ static int setup_new_group_blocks(struct super_block *sb,
ext4_debug("update backup group %#04lx (+%d)\n", block, bit);
+ if ((err = extend_or_restart_transaction(handle, 1, bh)))
+ goto exit_bh;
+
gdb = sb_getblk(sb, block);
if (!gdb) {
err = -EIO;
@@ -217,10 +229,10 @@ static int setup_new_group_blocks(struct super_block *sb,
brelse(gdb);
goto exit_bh;
}
- lock_buffer(bh);
- memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, bh->b_size);
+ lock_buffer(gdb);
+ memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
- unlock_buffer(bh);
+ unlock_buffer(gdb);
ext4_journal_dirty_metadata(handle, gdb);
ext4_set_bit(bit, bh->b_data);
brelse(gdb);
@@ -233,6 +245,9 @@ static int setup_new_group_blocks(struct super_block *sb,
ext4_debug("clear reserved block %#04lx (+%d)\n", block, bit);
+ if ((err = extend_or_restart_transaction(handle, 1, bh)))
+ goto exit_bh;
+
if (IS_ERR(gdb = bclean(handle, sb, block))) {
err = PTR_ERR(bh);
goto exit_bh;
@@ -254,6 +269,10 @@ static int setup_new_group_blocks(struct super_block *sb,
struct buffer_head *it;
ext4_debug("clear inode block %#04lx (+%d)\n", block, bit);
+
+ if ((err = extend_or_restart_transaction(handle, 1, bh)))
+ goto exit_bh;
+
if (IS_ERR(it = bclean(handle, sb, block))) {
err = PTR_ERR(it);
goto exit_bh;
@@ -262,6 +281,10 @@ static int setup_new_group_blocks(struct super_block *sb,
brelse(it);
ext4_set_bit(bit, bh->b_data);
}
+
+ if ((err = extend_or_restart_transaction(handle, 2, bh)))
+ goto exit_bh;
+
mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb),
bh->b_data);
ext4_journal_dirty_metadata(handle, bh);
@@ -289,7 +312,6 @@ exit_journal:
return err;
}
-
/*
* Iterate through the groups which hold BACKUP superblock/GDT copies in an
* ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
@@ -842,6 +864,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb));
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
/*
* Make the new blocks and inodes valid next. We do this before
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4c8d31c6145..8031dc0e24e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -37,12 +37,14 @@
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/log2.h>
+#include <linux/crc16.h>
#include <asm/uaccess.h>
#include "xattr.h"
#include "acl.h"
#include "namei.h"
+#include "group.h"
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
@@ -68,31 +70,31 @@ static void ext4_write_super_lockfs(struct super_block *sb);
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
- return le32_to_cpu(bg->bg_block_bitmap) |
+ return le32_to_cpu(bg->bg_block_bitmap_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
+ (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
}
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
- return le32_to_cpu(bg->bg_inode_bitmap) |
+ return le32_to_cpu(bg->bg_inode_bitmap_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
+ (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
}
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
struct ext4_group_desc *bg)
{
- return le32_to_cpu(bg->bg_inode_table) |
+ return le32_to_cpu(bg->bg_inode_table_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
- (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
+ (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
}
void ext4_block_bitmap_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
- bg->bg_block_bitmap = cpu_to_le32((u32)blk);
+ bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
}
@@ -100,7 +102,7 @@ void ext4_block_bitmap_set(struct super_block *sb,
void ext4_inode_bitmap_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
- bg->bg_inode_bitmap = cpu_to_le32((u32)blk);
+ bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
}
@@ -108,7 +110,7 @@ void ext4_inode_bitmap_set(struct super_block *sb,
void ext4_inode_table_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
- bg->bg_inode_table = cpu_to_le32((u32)blk);
+ bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
}
@@ -684,13 +686,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
}
-static struct dentry *ext4_get_dentry(struct super_block *sb, void *vobjp)
+static struct inode *ext4_nfs_get_inode(struct super_block *sb,
+ u64 ino, u32 generation)
{
- __u32 *objp = vobjp;
- unsigned long ino = objp[0];
- __u32 generation = objp[1];
struct inode *inode;
- struct dentry *result;
if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
return ERR_PTR(-ESTALE);
@@ -713,15 +712,22 @@ static struct dentry *ext4_get_dentry(struct super_block *sb, void *vobjp)
iput(inode);
return ERR_PTR(-ESTALE);
}
- /* now to find a dentry.
- * If possible, get a well-connected one
- */
- result = d_alloc_anon(inode);
- if (!result) {
- iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- return result;
+
+ return inode;
+}
+
+static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ ext4_nfs_get_inode);
+}
+
+static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ ext4_nfs_get_inode);
}
#ifdef CONFIG_QUOTA
@@ -790,9 +796,10 @@ static const struct super_operations ext4_sops = {
#endif
};
-static struct export_operations ext4_export_ops = {
+static const struct export_operations ext4_export_ops = {
+ .fh_to_dentry = ext4_fh_to_dentry,
+ .fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
- .get_dentry = ext4_get_dentry,
};
enum {
@@ -1037,7 +1044,7 @@ static int parse_options (char *options, struct super_block *sb,
if (option < 0)
return 0;
if (option == 0)
- option = JBD_DEFAULT_MAX_COMMIT_AGE;
+ option = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * option;
break;
case Opt_data_journal:
@@ -1308,6 +1315,43 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
return res;
}
+__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ struct ext4_group_desc *gdp)
+{
+ __u16 crc = 0;
+
+ if (sbi->s_es->s_feature_ro_compat &
+ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+ int offset = offsetof(struct ext4_group_desc, bg_checksum);
+ __le32 le_group = cpu_to_le32(block_group);
+
+ crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+ crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
+ crc = crc16(crc, (__u8 *)gdp, offset);
+ offset += sizeof(gdp->bg_checksum); /* skip checksum */
+ /* for checksum of struct ext4_group_desc do the rest...*/
+ if ((sbi->s_es->s_feature_incompat &
+ cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
+ offset < le16_to_cpu(sbi->s_es->s_desc_size))
+ crc = crc16(crc, (__u8 *)gdp + offset,
+ le16_to_cpu(sbi->s_es->s_desc_size) -
+ offset);
+ }
+
+ return cpu_to_le16(crc);
+}
+
+int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
+ struct ext4_group_desc *gdp)
+{
+ if ((sbi->s_es->s_feature_ro_compat &
+ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
+ (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
+ return 0;
+
+ return 1;
+}
+
/* Called at mount-time, super-block is locked */
static int ext4_check_descriptors (struct super_block * sb)
{
@@ -1319,13 +1363,17 @@ static int ext4_check_descriptors (struct super_block * sb)
ext4_fsblk_t inode_table;
struct ext4_group_desc * gdp = NULL;
int desc_block = 0;
+ int flexbg_flag = 0;
int i;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
+ flexbg_flag = 1;
+
ext4_debug ("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++)
{
- if (i == sbi->s_groups_count - 1)
+ if (i == sbi->s_groups_count - 1 || flexbg_flag)
last_block = ext4_blocks_count(sbi->s_es) - 1;
else
last_block = first_block +
@@ -1362,7 +1410,16 @@ static int ext4_check_descriptors (struct super_block * sb)
i, inode_table);
return 0;
}
- first_block += EXT4_BLOCKS_PER_GROUP(sb);
+ if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+ ext4_error(sb, __FUNCTION__,
+ "Checksum for group %d failed (%u!=%u)\n", i,
+ le16_to_cpu(ext4_group_desc_csum(sbi, i,
+ gdp)),
+ le16_to_cpu(gdp->bg_checksum));
+ return 0;
+ }
+ if (!flexbg_flag)
+ first_block += EXT4_BLOCKS_PER_GROUP(sb);
gdp = (struct ext4_group_desc *)
((__u8 *)gdp + EXT4_DESC_SIZE(sb));
}
@@ -1726,14 +1783,6 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
- sbi->s_frag_size = EXT4_MIN_FRAG_SIZE <<
- le32_to_cpu(es->s_log_frag_size);
- if (blocksize != sbi->s_frag_size) {
- printk(KERN_ERR
- "EXT4-fs: fragsize %lu != blocksize %u (unsupported)\n",
- sbi->s_frag_size, blocksize);
- goto failed_mount;
- }
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
@@ -1747,7 +1796,6 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
} else
sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
- sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT4_INODE_SIZE(sb) == 0)
goto cantfind_ext4;
@@ -1771,12 +1819,6 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group);
goto failed_mount;
}
- if (sbi->s_frags_per_group > blocksize * 8) {
- printk (KERN_ERR
- "EXT4-fs: #fragments per group too big: %lu\n",
- sbi->s_frags_per_group);
- goto failed_mount;
- }
if (sbi->s_inodes_per_group > blocksize * 8) {
printk (KERN_ERR
"EXT4-fs: #inodes per group too big: %lu\n",
@@ -2630,7 +2672,7 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
if (test_opt(sb, MINIX_DF)) {
sbi->s_overhead_last = 0;
- } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
+ } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
unsigned long ngroups = sbi->s_groups_count, i;
ext4_fsblk_t overhead = 0;
smp_rmb();
@@ -2665,14 +2707,14 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
overhead += ngroups * (2 + sbi->s_itb_per_group);
sbi->s_overhead_last = overhead;
smp_wmb();
- sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
+ sbi->s_blocks_last = ext4_blocks_count(es);
}
buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
- es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
+ ext4_free_blocks_count_set(es, buf->f_bfree);
buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
if (buf->f_bfree < ext4_r_blocks_count(es))
buf->f_bavail = 0;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b10d68fffb5..86387302c2a 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -750,12 +750,11 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
}
} else {
/* Allocate a buffer where we construct the new block. */
- s->base = kmalloc(sb->s_blocksize, GFP_KERNEL);
+ s->base = kzalloc(sb->s_blocksize, GFP_KERNEL);
/* assert(header == s->base) */
error = -ENOMEM;
if (s->base == NULL)
goto cleanup;
- memset(s->base, 0, sb->s_blocksize);
header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
header(s->base)->h_blocks = cpu_to_le32(1);
header(s->base)->h_refcount = cpu_to_le32(1);
@@ -1121,7 +1120,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
int total_ino, total_blk;
void *base, *start, *end;
int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
- int s_min_extra_isize = EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize;
+ int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
down_write(&EXT4_I(inode)->xattr_sem);
retry:
@@ -1293,7 +1292,7 @@ retry:
i.name = b_entry_name;
i.value = buffer;
- i.value_len = cpu_to_le32(size);
+ i.value_len = size;
error = ext4_xattr_block_find(inode, &i, bs);
if (error)
goto cleanup;