From: Greg Kroah-Hartman Date: Wed, 18 Feb 2009 19:16:02 +0000 (-0800) Subject: .27 ext4 patches X-Git-Tag: v2.6.27.19~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=bcc98f182ef93264f5ee6c5711a2d0a6afcf9877;p=thirdparty%2Fkernel%2Fstable-queue.git .27 ext4 patches --- diff --git a/queue-2.6.27/ext4-add-blocks-added-during-resize-to-bitmap.patch b/queue-2.6.27/ext4-add-blocks-added-during-resize-to-bitmap.patch new file mode 100644 index 00000000000..d530ff638f7 --- /dev/null +++ b/queue-2.6.27/ext4-add-blocks-added-during-resize-to-bitmap.patch @@ -0,0 +1,223 @@ +From tytso@mit.edu Wed Feb 18 11:08:52 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:28 -0500 +Subject: ext4: Add blocks added during resize to bitmap +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-9-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit e21675d4b63975d09eb75c443c48ebe663d23e18) + +With this change new blocks added during resize +are marked as free in the block bitmap and the +group is flagged with EXT4_GROUP_INFO_NEED_INIT_BIT +flag. This make sure when mballoc tries to allocate +blocks from the new group we would reload the +buddy information using the bitmap present in the disk. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/balloc.c | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ + fs/ext4/ext4.h | 6 +- + fs/ext4/resize.c | 11 ---- + 3 files changed, 132 insertions(+), 11 deletions(-) + +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -20,6 +20,7 @@ + #include "ext4.h" + #include "ext4_jbd2.h" + #include "group.h" ++#include "mballoc.h" + + /* + * balloc.c contains the blocks allocation and deallocation routines +@@ -837,6 +838,131 @@ error_return: + } + + /** ++ * ext4_add_groupblocks() -- Add given blocks to an existing group ++ * @handle: handle to this transaction ++ * @sb: super block ++ * @block: start physcial block to add to the block group ++ * @count: number of blocks to free ++ * ++ * This marks the blocks as free in the bitmap. We ask the ++ * mballoc to reload the buddy after this by setting group ++ * EXT4_GROUP_INFO_NEED_INIT_BIT flag ++ */ ++void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ++ ext4_fsblk_t block, unsigned long count) ++{ ++ struct buffer_head *bitmap_bh = NULL; ++ struct buffer_head *gd_bh; ++ ext4_group_t block_group; ++ ext4_grpblk_t bit; ++ unsigned long i; ++ struct ext4_group_desc *desc; ++ struct ext4_super_block *es; ++ struct ext4_sb_info *sbi; ++ int err = 0, ret; ++ ext4_grpblk_t blocks_freed; ++ struct ext4_group_info *grp; ++ ++ sbi = EXT4_SB(sb); ++ es = sbi->s_es; ++ ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); ++ ++ ext4_get_group_no_and_offset(sb, block, &block_group, &bit); ++ /* ++ * Check to see if we are freeing blocks across a group ++ * boundary. ++ */ ++ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) ++ goto error_return; ++ ++ bitmap_bh = ext4_read_block_bitmap(sb, block_group); ++ if (!bitmap_bh) ++ goto error_return; ++ desc = ext4_get_group_desc(sb, block_group, &gd_bh); ++ if (!desc) ++ goto error_return; ++ ++ if (in_range(ext4_block_bitmap(sb, desc), block, count) || ++ in_range(ext4_inode_bitmap(sb, desc), block, count) || ++ in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || ++ in_range(block + count - 1, ext4_inode_table(sb, desc), ++ sbi->s_itb_per_group)) { ++ ext4_error(sb, __func__, ++ "Adding blocks in system zones - " ++ "Block = %llu, count = %lu", ++ block, count); ++ goto error_return; ++ } ++ ++ /* ++ * We are about to add blocks to the bitmap, ++ * so we need undo access. ++ */ ++ BUFFER_TRACE(bitmap_bh, "getting undo access"); ++ err = ext4_journal_get_undo_access(handle, bitmap_bh); ++ if (err) ++ goto error_return; ++ ++ /* ++ * We are about to modify some metadata. Call the journal APIs ++ * to unshare ->b_data if a currently-committing transaction is ++ * using it ++ */ ++ BUFFER_TRACE(gd_bh, "get_write_access"); ++ err = ext4_journal_get_write_access(handle, gd_bh); ++ if (err) ++ goto error_return; ++ ++ for (i = 0, blocks_freed = 0; i < count; i++) { ++ BUFFER_TRACE(bitmap_bh, "clear bit"); ++ if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), ++ bit + i, bitmap_bh->b_data)) { ++ ext4_error(sb, __func__, ++ "bit already cleared for block %llu", ++ (ext4_fsblk_t)(block + i)); ++ BUFFER_TRACE(bitmap_bh, "bit already cleared"); ++ } else { ++ blocks_freed++; ++ } ++ } ++ spin_lock(sb_bgl_lock(sbi, block_group)); ++ le16_add_cpu(&desc->bg_free_blocks_count, blocks_freed); ++ desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); ++ spin_unlock(sb_bgl_lock(sbi, block_group)); ++ percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); ++ ++ if (sbi->s_log_groups_per_flex) { ++ ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ++ spin_lock(sb_bgl_lock(sbi, flex_group)); ++ sbi->s_flex_groups[flex_group].free_blocks += blocks_freed; ++ spin_unlock(sb_bgl_lock(sbi, flex_group)); ++ } ++ ++ /* We dirtied the bitmap block */ ++ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); ++ err = ext4_journal_dirty_metadata(handle, bitmap_bh); ++ ++ /* And the group descriptor block */ ++ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ++ ret = ext4_journal_dirty_metadata(handle, gd_bh); ++ if (!err) ++ err = ret; ++ sb->s_dirt = 1; ++ /* ++ * request to reload the buddy with the ++ * new bitmap information ++ */ ++ grp = ext4_get_group_info(sb, block_group); ++ set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); ++ ext4_mb_update_group_info(grp, blocks_freed); ++ ++error_return: ++ brelse(bitmap_bh); ++ ext4_std_error(sb, err); ++ return; ++} ++ ++/** + * ext4_free_blocks() -- Free given blocks and update quota + * @handle: handle for this transaction + * @inode: inode +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -991,9 +991,11 @@ extern ext4_fsblk_t ext4_has_free_blocks + ext4_fsblk_t nblocks); + extern void ext4_free_blocks (handle_t *handle, struct inode *inode, + ext4_fsblk_t block, unsigned long count, int metadata); +-extern void ext4_free_blocks_sb (handle_t *handle, struct super_block *sb, +- ext4_fsblk_t block, unsigned long count, ++extern void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, ++ ext4_fsblk_t block, unsigned long count, + unsigned long *pdquot_freed_blocks); ++extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ++ ext4_fsblk_t block, unsigned long count); + extern ext4_fsblk_t ext4_count_free_blocks (struct super_block *); + extern void ext4_check_blocks_bitmap (struct super_block *); + extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -976,9 +976,7 @@ int ext4_group_extend(struct super_block + struct buffer_head * bh; + handle_t *handle; + int err; +- unsigned long freed_blocks; + ext4_group_t group; +- struct ext4_group_info *grp; + + /* We don't need to worry about locking wrt other resizers just + * yet: we're going to revalidate es->s_blocks_count after +@@ -1077,7 +1075,8 @@ int ext4_group_extend(struct super_block + unlock_super(sb); + ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, + o_blocks_count + add); +- ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); ++ /* We add the blocks to the bitmap and set the group need init bit */ ++ ext4_add_groupblocks(handle, sb, o_blocks_count, add); + ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, + o_blocks_count + add); + if ((err = ext4_journal_stop(handle))) +@@ -1113,12 +1112,6 @@ int ext4_group_extend(struct super_block + ClearPageUptodate(page); + page_cache_release(page); + } +- +- /* Get the info on the last group */ +- grp = ext4_get_group_info(sb, group); +- +- /* Update free blocks in group info */ +- ext4_mb_update_group_info(grp, add); + } + + if (test_opt(sb, DEBUG)) diff --git a/queue-2.6.27/ext4-add-sanity-check-to-make_indexed_dir.patch b/queue-2.6.27/ext4-add-sanity-check-to-make_indexed_dir.patch new file mode 100644 index 00000000000..aeacb909981 --- /dev/null +++ b/queue-2.6.27/ext4-add-sanity-check-to-make_indexed_dir.patch @@ -0,0 +1,71 @@ +From tytso@mit.edu Wed Feb 18 11:13:59 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:42 -0500 +Subject: ext4: Add sanity check to make_indexed_dir +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-23-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit e6b8bc09ba2075cd91fbffefcd2778b1a00bd76f) + +Make sure the rec_len field in the '..' entry is sane, lest we overrun +the directory block and cause a kernel oops on a purposefully +corrupted filesystem. + +Thanks to Sami Liedes for reporting this bug. + +http://bugzilla.kernel.org/show_bug.cgi?id=12430 + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/namei.c | 21 +++++++++++++++------ + 1 file changed, 15 insertions(+), 6 deletions(-) + +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1382,7 +1382,7 @@ static int make_indexed_dir(handle_t *ha + struct fake_dirent *fde; + + blocksize = dir->i_sb->s_blocksize; +- dxtrace(printk("Creating index\n")); ++ dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); + retval = ext4_journal_get_write_access(handle, bh); + if (retval) { + ext4_std_error(dir->i_sb, retval); +@@ -1391,6 +1391,20 @@ static int make_indexed_dir(handle_t *ha + } + root = (struct dx_root *) bh->b_data; + ++ /* The 0th block becomes the root, move the dirents out */ ++ fde = &root->dotdot; ++ de = (struct ext4_dir_entry_2 *)((char *)fde + ++ ext4_rec_len_from_disk(fde->rec_len)); ++ if ((char *) de >= (((char *) root) + blocksize)) { ++ ext4_error(dir->i_sb, __func__, ++ "invalid rec_len for '..' in inode %lu", ++ dir->i_ino); ++ brelse(bh); ++ return -EIO; ++ } ++ len = ((char *) root) + blocksize - (char *) de; ++ ++ /* Allocate new block for the 0th block's dirents */ + bh2 = ext4_append (handle, dir, &block, &retval); + if (!(bh2)) { + brelse(bh); +@@ -1399,11 +1413,6 @@ static int make_indexed_dir(handle_t *ha + EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; + data1 = bh2->b_data; + +- /* The 0th block becomes the root, move the dirents out */ +- fde = &root->dotdot; +- de = (struct ext4_dir_entry_2 *)((char *)fde + +- ext4_rec_len_from_disk(fde->rec_len)); +- len = ((char *) root) + blocksize - (char *) de; + memcpy (data1, de, len); + de = (struct ext4_dir_entry_2 *) data1; + top = data1 + len; diff --git a/queue-2.6.27/ext4-add-sanity-checks-for-the-superblock-before-mounting-the-filesystem.patch b/queue-2.6.27/ext4-add-sanity-checks-for-the-superblock-before-mounting-the-filesystem.patch new file mode 100644 index 00000000000..59de2cdd420 --- /dev/null +++ b/queue-2.6.27/ext4-add-sanity-checks-for-the-superblock-before-mounting-the-filesystem.patch @@ -0,0 +1,81 @@ +From tytso@mit.edu Wed Feb 18 11:13:18 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:40 -0500 +Subject: ext4: Add sanity checks for the superblock before mounting the filesystem +To: stable@kernel.org +Cc: Thiemo Nagel , linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-21-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit 4ec110281379826c5cf6ed14735e47027c3c5765) + +This avoids insane superblock configurations that could lead to kernel +oops due to null pointer derefences. + +http://bugzilla.kernel.org/show_bug.cgi?id=12371 + +Thanks to David Maciejak at Fortinet's FortiGuard Global Security +Research Team who discovered this bug independently (but at +approximately the same time) as Thiemo Nagel, who submitted the patch. + +Signed-off-by: Thiemo Nagel +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/super.c | 30 ++++++++++++++++++++---------- + 1 file changed, 20 insertions(+), 10 deletions(-) + +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1916,8 +1916,8 @@ static int ext4_fill_super(struct super_ + struct inode *root; + int ret = -EINVAL; + int blocksize; +- int db_count; +- int i; ++ unsigned int db_count; ++ unsigned int i; + int needs_recovery; + __le32 features; + __u64 blocks_count; +@@ -2207,20 +2207,30 @@ static int ext4_fill_super(struct super_ + if (EXT4_BLOCKS_PER_GROUP(sb) == 0) + goto cantfind_ext4; + +- /* ensure blocks_count calculation below doesn't sign-extend */ +- if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) < +- le32_to_cpu(es->s_first_data_block) + 1) { +- printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, " +- "first data block %u, blocks per group %lu\n", +- ext4_blocks_count(es), +- le32_to_cpu(es->s_first_data_block), +- EXT4_BLOCKS_PER_GROUP(sb)); ++ /* ++ * It makes no sense for the first data block to be beyond the end ++ * of the filesystem. ++ */ ++ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ++ printk(KERN_WARNING "EXT4-fs: bad geometry: first data" ++ "block %u is beyond end of filesystem (%llu)\n", ++ le32_to_cpu(es->s_first_data_block), ++ ext4_blocks_count(es)); + goto failed_mount; + } + blocks_count = (ext4_blocks_count(es) - + le32_to_cpu(es->s_first_data_block) + + EXT4_BLOCKS_PER_GROUP(sb) - 1); + do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); ++ if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ++ printk(KERN_WARNING "EXT4-fs: groups count too large: %u " ++ "(block count %llu, first data block %u, " ++ "blocks per group %lu)\n", sbi->s_groups_count, ++ ext4_blocks_count(es), ++ le32_to_cpu(es->s_first_data_block), ++ EXT4_BLOCKS_PER_GROUP(sb)); ++ goto failed_mount; ++ } + sbi->s_groups_count = blocks_count; + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / + EXT4_DESC_PER_BLOCK(sb); diff --git a/queue-2.6.27/ext4-add-support-for-non-native-signed-unsigned-htree-hash-algorithms.patch b/queue-2.6.27/ext4-add-support-for-non-native-signed-unsigned-htree-hash-algorithms.patch new file mode 100644 index 00000000000..e1357f84ec8 --- /dev/null +++ b/queue-2.6.27/ext4-add-support-for-non-native-signed-unsigned-htree-hash-algorithms.patch @@ -0,0 +1,235 @@ +From tytso@mit.edu Wed Feb 18 10:59:39 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:21 -0500 +Subject: ext4: Add support for non-native signed/unsigned htree hash algorithms +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-2-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit f99b25897a86fcfff9140396a97261ae65fed872) + +The original ext3 hash algorithms assumed that variables of type char +were signed, as God and K&R intended. Unfortunately, this assumption +is not true on some architectures. Userspace support for marking +filesystems with non-native signed/unsigned chars was added two years +ago, but the kernel-side support was never added (until now). + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ext4.h | 3 ++ + fs/ext4/ext4_sb.h | 1 + fs/ext4/hash.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++-------- + fs/ext4/namei.c | 7 ++++ + fs/ext4/super.c | 12 ++++++++ + 5 files changed, 90 insertions(+), 10 deletions(-) + +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -889,6 +889,9 @@ static inline __le16 ext4_rec_len_to_dis + #define DX_HASH_LEGACY 0 + #define DX_HASH_HALF_MD4 1 + #define DX_HASH_TEA 2 ++#define DX_HASH_LEGACY_UNSIGNED 3 ++#define DX_HASH_HALF_MD4_UNSIGNED 4 ++#define DX_HASH_TEA_UNSIGNED 5 + + #ifdef __KERNEL__ + +--- a/fs/ext4/ext4_sb.h ++++ b/fs/ext4/ext4_sb.h +@@ -56,6 +56,7 @@ struct ext4_sb_info { + u32 s_next_generation; + u32 s_hash_seed[4]; + int s_def_hash_version; ++ int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ + struct percpu_counter s_freeblocks_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; +--- a/fs/ext4/hash.c ++++ b/fs/ext4/hash.c +@@ -35,23 +35,71 @@ static void TEA_transform(__u32 buf[4], + + + /* The old legacy hash */ +-static __u32 dx_hack_hash (const char *name, int len) ++static __u32 dx_hack_hash_unsigned(const char *name, int len) + { +- __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; ++ __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; ++ const unsigned char *ucp = (const unsigned char *) name; ++ ++ while (len--) { ++ hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); ++ ++ if (hash & 0x80000000) ++ hash -= 0x7fffffff; ++ hash1 = hash0; ++ hash0 = hash; ++ } ++ return hash0 << 1; ++} ++ ++static __u32 dx_hack_hash_signed(const char *name, int len) ++{ ++ __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; ++ const signed char *scp = (const signed char *) name; ++ + while (len--) { +- __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373)); ++ hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); + +- if (hash & 0x80000000) hash -= 0x7fffffff; ++ if (hash & 0x80000000) ++ hash -= 0x7fffffff; + hash1 = hash0; + hash0 = hash; + } +- return (hash0 << 1); ++ return hash0 << 1; + } + +-static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) ++static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num) + { + __u32 pad, val; + int i; ++ const signed char *scp = (const signed char *) msg; ++ ++ pad = (__u32)len | ((__u32)len << 8); ++ pad |= pad << 16; ++ ++ val = pad; ++ if (len > num*4) ++ len = num * 4; ++ for (i = 0; i < len; i++) { ++ if ((i % 4) == 0) ++ val = pad; ++ val = ((int) scp[i]) + (val << 8); ++ if ((i % 4) == 3) { ++ *buf++ = val; ++ val = pad; ++ num--; ++ } ++ } ++ if (--num >= 0) ++ *buf++ = val; ++ while (--num >= 0) ++ *buf++ = pad; ++} ++ ++static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) ++{ ++ __u32 pad, val; ++ int i; ++ const unsigned char *ucp = (const unsigned char *) msg; + + pad = (__u32)len | ((__u32)len << 8); + pad |= pad << 16; +@@ -62,7 +110,7 @@ static void str2hashbuf(const char *msg, + for (i=0; i < len; i++) { + if ((i % 4) == 0) + val = pad; +- val = msg[i] + (val << 8); ++ val = ((int) ucp[i]) + (val << 8); + if ((i % 4) == 3) { + *buf++ = val; + val = pad; +@@ -95,6 +143,8 @@ int ext4fs_dirhash(const char *name, int + const char *p; + int i; + __u32 in[8], buf[4]; ++ void (*str2hashbuf)(const char *, int, __u32 *, int) = ++ str2hashbuf_signed; + + /* Initialize the default seed for the hash checksum functions */ + buf[0] = 0x67452301; +@@ -113,13 +163,18 @@ int ext4fs_dirhash(const char *name, int + } + + switch (hinfo->hash_version) { ++ case DX_HASH_LEGACY_UNSIGNED: ++ hash = dx_hack_hash_unsigned(name, len); ++ break; + case DX_HASH_LEGACY: +- hash = dx_hack_hash(name, len); ++ hash = dx_hack_hash_signed(name, len); + break; ++ case DX_HASH_HALF_MD4_UNSIGNED: ++ str2hashbuf = str2hashbuf_unsigned; + case DX_HASH_HALF_MD4: + p = name; + while (len > 0) { +- str2hashbuf(p, len, in, 8); ++ (*str2hashbuf)(p, len, in, 8); + half_md4_transform(buf, in); + len -= 32; + p += 32; +@@ -127,10 +182,12 @@ int ext4fs_dirhash(const char *name, int + minor_hash = buf[2]; + hash = buf[1]; + break; ++ case DX_HASH_TEA_UNSIGNED: ++ str2hashbuf = str2hashbuf_unsigned; + case DX_HASH_TEA: + p = name; + while (len > 0) { +- str2hashbuf(p, len, in, 4); ++ (*str2hashbuf)(p, len, in, 4); + TEA_transform(buf, in); + len -= 16; + p += 16; +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -371,6 +371,8 @@ dx_probe(struct dentry *dentry, struct i + goto fail; + } + hinfo->hash_version = root->info.hash_version; ++ if (hinfo->hash_version <= DX_HASH_TEA) ++ hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; + hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; + if (dentry) + ext4fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo); +@@ -640,6 +642,9 @@ int ext4_htree_fill_tree(struct file *di + dir = dir_file->f_path.dentry->d_inode; + if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { + hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; ++ if (hinfo.hash_version <= DX_HASH_TEA) ++ hinfo.hash_version += ++ EXT4_SB(dir->i_sb)->s_hash_unsigned; + hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; + count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, + start_hash, start_minor_hash); +@@ -1418,6 +1423,8 @@ static int make_indexed_dir(handle_t *ha + + /* Initialize as for dx_probe */ + hinfo.hash_version = root->info.hash_version; ++ if (hinfo.hash_version <= DX_HASH_TEA) ++ hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; + hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; + ext4fs_dirhash(name, namelen, &hinfo); + frame = frames; +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2172,6 +2172,18 @@ static int ext4_fill_super(struct super_ + for (i = 0; i < 4; i++) + sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); + sbi->s_def_hash_version = es->s_def_hash_version; ++ i = le32_to_cpu(es->s_flags); ++ if (i & EXT2_FLAGS_UNSIGNED_HASH) ++ sbi->s_hash_unsigned = 3; ++ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { ++#ifdef __CHAR_UNSIGNED__ ++ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); ++ sbi->s_hash_unsigned = 3; ++#else ++ es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); ++#endif ++ sb->s_dirt = 1; ++ } + + if (sbi->s_blocks_per_group > blocksize * 8) { + printk(KERN_ERR diff --git a/queue-2.6.27/ext4-avoid-ext4_error-when-mounting-a-fs-with-a-single-bg.patch b/queue-2.6.27/ext4-avoid-ext4_error-when-mounting-a-fs-with-a-single-bg.patch new file mode 100644 index 00000000000..54b1f7cc7a5 --- /dev/null +++ b/queue-2.6.27/ext4-avoid-ext4_error-when-mounting-a-fs-with-a-single-bg.patch @@ -0,0 +1,45 @@ +From tytso@mit.edu Wed Feb 18 11:07:20 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:24 -0500 +Subject: ext4: avoid ext4_error when mounting a fs with a single bg +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-5-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 565a9617b2151e21b22700e97a8b04e70e103153) + +Remove some completely unneeded code which which caused an ext4_error +to be generated when mounting a file system with only a single block +group. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/super.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1493,7 +1493,6 @@ static int ext4_fill_flex_info(struct su + ext4_group_t flex_group_count; + ext4_group_t flex_group; + int groups_per_flex = 0; +- __u64 block_bitmap = 0; + int i; + + if (!sbi->s_es->s_log_groups_per_flex) { +@@ -1516,9 +1515,6 @@ static int ext4_fill_flex_info(struct su + goto failed; + } + +- gdp = ext4_get_group_desc(sb, 1, &bh); +- block_bitmap = ext4_block_bitmap(sb, gdp) - 1; +- + for (i = 0; i < sbi->s_groups_count; i++) { + gdp = ext4_get_group_desc(sb, i, &bh); + diff --git a/queue-2.6.27/ext4-cleanup-mballoc-header-files.patch b/queue-2.6.27/ext4-cleanup-mballoc-header-files.patch new file mode 100644 index 00000000000..260b3471bd0 --- /dev/null +++ b/queue-2.6.27/ext4-cleanup-mballoc-header-files.patch @@ -0,0 +1,129 @@ +From tytso@mit.edu Wed Feb 18 11:09:37 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:30 -0500 +Subject: ext4: cleanup mballoc header files +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-11-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit c3a326a657562dab81acf05aee106dc1fe345eb4) + +Move some of the forward declaration of the static functions +to mballoc.c where they are used. This enables us to include +mballoc.h in other .c files. Also correct the buddy cache +documentation. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 22 ++++++++++++++++++---- + fs/ext4/mballoc.h | 20 +------------------- + 2 files changed, 19 insertions(+), 23 deletions(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -100,7 +100,7 @@ + * inode as: + * + * { page } +- * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... ++ * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... + * + * + * one block each for bitmap and buddy information. So for each group we +@@ -330,6 +330,15 @@ + * object + * + */ ++static struct kmem_cache *ext4_pspace_cachep; ++static struct kmem_cache *ext4_ac_cachep; ++static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ++ ext4_group_t group); ++static int ext4_mb_init_per_dev_proc(struct super_block *sb); ++static int ext4_mb_destroy_per_dev_proc(struct super_block *sb); ++static void ext4_mb_free_committed_blocks(struct super_block *); ++static void ext4_mb_poll_new_transaction(struct super_block *sb, ++ handle_t *handle); + + static inline void *mb_correct_addr_and_bit(int *bit, void *addr) + { +@@ -718,7 +727,7 @@ static void ext4_mb_generate_buddy(struc + * stored in the inode as + * + * { page } +- * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]... ++ * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... + * + * + * one block each for bitmap and buddy information. +@@ -1320,8 +1329,13 @@ static void ext4_mb_use_best_found(struc + ac->ac_tail = ret & 0xffff; + ac->ac_buddy = ret >> 16; + +- /* XXXXXXX: SUCH A HORRIBLE **CK */ +- /*FIXME!! Why ? */ ++ /* ++ * take the page reference. We want the page to be pinned ++ * so that we don't get a ext4_mb_init_cache_call for this ++ * group until we update the bitmap. That would mean we ++ * double allocate blocks. The reference is dropped ++ * in ext4_mb_release_context ++ */ + ac->ac_bitmap_page = e4b->bd_bitmap_page; + get_page(ac->ac_bitmap_page); + ac->ac_buddy_page = e4b->bd_buddy_page; +--- a/fs/ext4/mballoc.h ++++ b/fs/ext4/mballoc.h +@@ -97,9 +97,6 @@ + */ + #define MB_DEFAULT_GROUP_PREALLOC 512 + +-static struct kmem_cache *ext4_pspace_cachep; +-static struct kmem_cache *ext4_ac_cachep; +- + #ifdef EXT4_BB_MAX_BLOCKS + #undef EXT4_BB_MAX_BLOCKS + #endif +@@ -254,8 +251,6 @@ static inline void ext4_mb_store_history + { + return; + } +-#else +-static void ext4_mb_store_history(struct ext4_allocation_context *ac); + #endif + + #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) +@@ -263,19 +258,6 @@ static void ext4_mb_store_history(struct + static struct proc_dir_entry *proc_root_ext4; + struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t); + +-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, +- ext4_group_t group); +-static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *); +-static void ext4_mb_free_committed_blocks(struct super_block *); +-static void ext4_mb_return_to_preallocation(struct inode *inode, +- struct ext4_buddy *e4b, sector_t block, +- int count); +-static void ext4_mb_put_pa(struct ext4_allocation_context *, +- struct super_block *, struct ext4_prealloc_space *pa); +-static int ext4_mb_init_per_dev_proc(struct super_block *sb); +-static int ext4_mb_destroy_per_dev_proc(struct super_block *sb); +- +- + static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) + { + struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); +@@ -300,7 +282,7 @@ static inline int ext4_is_group_locked(s + &(grinfo->bb_state)); + } + +-static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, ++static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, + struct ext4_free_extent *fex) + { + ext4_fsblk_t block; diff --git a/queue-2.6.27/ext4-don-t-allow-new-groups-to-be-added-during-block-allocation.patch b/queue-2.6.27/ext4-don-t-allow-new-groups-to-be-added-during-block-allocation.patch new file mode 100644 index 00000000000..1cf6cd75833 --- /dev/null +++ b/queue-2.6.27/ext4-don-t-allow-new-groups-to-be-added-during-block-allocation.patch @@ -0,0 +1,105 @@ +From tytso@mit.edu Wed Feb 18 11:12:34 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:38 -0500 +Subject: ext4: Don't allow new groups to be added during block allocation +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-19-git-send-email-tytso@mit.edu> + +From: Aneesh Kumar K.V + +(cherry picked from commit 8556e8f3b6c4c11601ce1e9ea8090a6d8bd5daae) + +After we mark the blocks in the buddy cache as allocated, +we need to ensure that we don't reinit the buddy cache until +the block bitmap is updated. This commit achieves this by holding +the group_info alloc_semaphore till ext4_mb_release_context + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 18 ++++++++++++++---- + fs/ext4/mballoc.h | 5 +++++ + 2 files changed, 19 insertions(+), 4 deletions(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1054,7 +1054,8 @@ static void ext4_mb_release_desc(struct + if (e4b->bd_buddy_page) + page_cache_release(e4b->bd_buddy_page); + /* Done with the buddy cache */ +- up_read(e4b->alloc_semp); ++ if (e4b->alloc_semp) ++ up_read(e4b->alloc_semp); + } + + +@@ -1374,7 +1375,9 @@ static void ext4_mb_use_best_found(struc + get_page(ac->ac_bitmap_page); + ac->ac_buddy_page = e4b->bd_buddy_page; + get_page(ac->ac_buddy_page); +- ++ /* on allocation we use ac to track the held semaphore */ ++ ac->alloc_semp = e4b->alloc_semp; ++ e4b->alloc_semp = NULL; + /* store last allocated for subsequent stream allocation */ + if ((ac->ac_flags & EXT4_MB_HINT_DATA)) { + spin_lock(&sbi->s_md_lock); +@@ -3148,7 +3151,7 @@ ext4_mb_mark_diskspace_used(struct ext4_ + in_range(block + len - 1, ext4_inode_table(sb, gdp), + EXT4_SB(sb)->s_itb_per_group)) { + ext4_error(sb, __func__, +- "Allocating block %llu in system zone of %d group\n", ++ "Allocating block %llu in system zone of %lu group\n", + block, ac->ac_b_ex.fe_group); + /* File system mounted not to panic on error + * Fix the bitmap and repeat the block allocation +@@ -4399,6 +4402,7 @@ ext4_mb_initialize_context(struct ext4_a + ac->ac_pa = NULL; + ac->ac_bitmap_page = NULL; + ac->ac_buddy_page = NULL; ++ ac->alloc_semp = NULL; + ac->ac_lg = NULL; + + /* we have to define context: we'll we work with a file or +@@ -4579,6 +4583,8 @@ static int ext4_mb_release_context(struc + } + ext4_mb_put_pa(ac, ac->ac_sb, pa); + } ++ if (ac->alloc_semp) ++ up_read(ac->alloc_semp); + if (ac->ac_bitmap_page) + page_cache_release(ac->ac_bitmap_page); + if (ac->ac_buddy_page) +@@ -4682,10 +4688,14 @@ repeat: + ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) + ext4_mb_new_preallocation(ac); + } +- + if (likely(ac->ac_status == AC_STATUS_FOUND)) { + *errp = ext4_mb_mark_diskspace_used(ac, handle); + if (*errp == -EAGAIN) { ++ /* ++ * drop the reference that we took ++ * in ext4_mb_use_best_found ++ */ ++ ext4_mb_release_context(ac); + ac->ac_b_ex.fe_group = 0; + ac->ac_b_ex.fe_start = 0; + ac->ac_b_ex.fe_len = 0; +--- a/fs/ext4/mballoc.h ++++ b/fs/ext4/mballoc.h +@@ -213,6 +213,11 @@ struct ext4_allocation_context { + __u8 ac_op; /* operation, for history only */ + struct page *ac_bitmap_page; + struct page *ac_buddy_page; ++ /* ++ * pointer to the held semaphore upon successful ++ * block allocation ++ */ ++ struct rw_semaphore *alloc_semp; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; + }; diff --git a/queue-2.6.27/ext4-don-t-overwrite-allocation_context-ac_status.patch b/queue-2.6.27/ext4-don-t-overwrite-allocation_context-ac_status.patch new file mode 100644 index 00000000000..cf0812735b7 --- /dev/null +++ b/queue-2.6.27/ext4-don-t-overwrite-allocation_context-ac_status.patch @@ -0,0 +1,53 @@ +From tytso@mit.edu Wed Feb 18 11:08:29 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:27 -0500 +Subject: ext4: Don't overwrite allocation_context ac_status +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-8-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 032115fcef837a00336ddf7bda584e89789ea498) + +We can call ext4_mb_check_limits even after successfully allocating +the requested blocks. In that case, make sure we don't overwrite +ac_status if it already has the status AC_STATUS_FOUND. This fixes +the lockdep warning: + +============================================= +[ INFO: possible recursive locking detected ] +2.6.28-rc6-autokern1 #1 +--------------------------------------------- +fsstress/11948 is trying to acquire lock: + (&meta_group_info[i]->alloc_sem){----}, at: [] ext4_mb_load_buddy+0x9f/0x278 +..... + +stack backtrace: +..... + [] ext4_mb_regular_allocator+0xbb5/0xd44 +..... + +but task is already holding lock: + (&meta_group_info[i]->alloc_sem){----}, at: [] ext4_mb_load_buddy+0x9f/0x278 + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1326,6 +1326,8 @@ static void ext4_mb_check_limits(struct + struct ext4_free_extent ex; + int max; + ++ if (ac->ac_status == AC_STATUS_FOUND) ++ return; + /* + * We don't want to scan for a whole year + */ diff --git a/queue-2.6.27/ext4-don-t-use-blocks-freed-but-not-yet-committed-in-buddy-cache-init.patch b/queue-2.6.27/ext4-don-t-use-blocks-freed-but-not-yet-committed-in-buddy-cache-init.patch new file mode 100644 index 00000000000..b1185918473 --- /dev/null +++ b/queue-2.6.27/ext4-don-t-use-blocks-freed-but-not-yet-committed-in-buddy-cache-init.patch @@ -0,0 +1,189 @@ +From tytso@mit.edu Wed Feb 18 11:10:17 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:32 -0500 +Subject: ext4: don't use blocks freed but not yet committed in buddy cache init +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-13-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 7a2fcbf7f85737735fd44eb34b62315bccf6d6e4) + +When we generate buddy cache (especially during resize) we need to +make sure we don't use the blocks freed but not yet comitted. This +makes sure we have the right value of free blocks count in the group +info and also in the bitmap. This also ensures the ordered mode +consistency + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 82 +++++++++++++++++++++++++++++++++++++++--------------- + 1 file changed, 60 insertions(+), 22 deletions(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -335,6 +335,8 @@ static struct kmem_cache *ext4_ac_cachep + static struct kmem_cache *ext4_free_ext_cachep; + static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, + ext4_group_t group); ++static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ++ ext4_group_t group); + static int ext4_mb_init_per_dev_proc(struct super_block *sb); + static int ext4_mb_destroy_per_dev_proc(struct super_block *sb); + static void ext4_mb_free_committed_blocks(struct super_block *); +@@ -858,7 +860,9 @@ static int ext4_mb_init_cache(struct pag + /* + * incore got set to the group block bitmap below + */ ++ ext4_lock_group(sb, group); + ext4_mb_generate_buddy(sb, data, incore, group); ++ ext4_unlock_group(sb, group); + incore = NULL; + } else { + /* this is block of bitmap */ +@@ -872,6 +876,7 @@ static int ext4_mb_init_cache(struct pag + + /* mark all preallocated blks used in in-core bitmap */ + ext4_mb_generate_from_pa(sb, data, group); ++ ext4_mb_generate_from_freelist(sb, data, group); + ext4_unlock_group(sb, group); + + /* set incore so that the buddy information can be +@@ -3579,6 +3584,32 @@ ext4_mb_use_preallocated(struct ext4_all + } + + /* ++ * the function goes through all block freed in the group ++ * but not yet committed and marks them used in in-core bitmap. ++ * buddy must be generated from this bitmap ++ * Need to be called with ext4 group lock (ext4_lock_group) ++ */ ++static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ++ ext4_group_t group) ++{ ++ struct rb_node *n; ++ struct ext4_group_info *grp; ++ struct ext4_free_data *entry; ++ ++ grp = ext4_get_group_info(sb, group); ++ n = rb_first(&(grp->bb_free_root)); ++ ++ while (n) { ++ entry = rb_entry(n, struct ext4_free_data, node); ++ mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), ++ bitmap, entry->start_blk, ++ entry->count); ++ n = rb_next(n); ++ } ++ return; ++} ++ ++/* + * the function goes through all preallocation in this group and marks them + * used in in-core bitmap. buddy must be generated from this bitmap + * Need to be called with ext4 group lock (ext4_lock_group) +@@ -4709,27 +4740,22 @@ static int can_merge(struct ext4_free_da + + static noinline_for_stack int + ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, +- ext4_group_t group, ext4_grpblk_t block, int count) ++ struct ext4_free_data *new_entry) + { ++ ext4_grpblk_t block; ++ struct ext4_free_data *entry; + struct ext4_group_info *db = e4b->bd_info; + struct super_block *sb = e4b->bd_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct ext4_free_data *entry, *new_entry; + struct rb_node **n = &db->bb_free_root.rb_node, *node; + struct rb_node *parent = NULL, *new_node; + +- + BUG_ON(e4b->bd_bitmap_page == NULL); + BUG_ON(e4b->bd_buddy_page == NULL); + +- new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); +- new_entry->start_blk = block; +- new_entry->group = group; +- new_entry->count = count; +- new_entry->t_tid = handle->h_transaction->t_tid; + new_node = &new_entry->node; ++ block = new_entry->start_blk; + +- ext4_lock_group(sb, group); + if (!*n) { + /* first free block exent. We need to + protect buddy cache from being freed, +@@ -4788,7 +4814,6 @@ ext4_mb_free_metadata(handle_t *handle, + spin_lock(&sbi->s_md_lock); + list_add(&new_entry->list, &sbi->s_active_transaction); + spin_unlock(&sbi->s_md_lock); +- ext4_unlock_group(sb, group); + return 0; + } + +@@ -4895,15 +4920,6 @@ do_more: + BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); + } + #endif +- mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, +- bit, count); +- +- /* We dirtied the bitmap block */ +- BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); +- err = ext4_journal_dirty_metadata(handle, bitmap_bh); +- if (err) +- goto error_return; +- + if (ac) { + ac->ac_b_ex.fe_group = block_group; + ac->ac_b_ex.fe_start = bit; +@@ -4915,11 +4931,29 @@ do_more: + if (err) + goto error_return; + if (metadata) { +- /* blocks being freed are metadata. these blocks shouldn't +- * be used until this transaction is committed */ +- ext4_mb_free_metadata(handle, &e4b, block_group, bit, count); ++ struct ext4_free_data *new_entry; ++ /* ++ * blocks being freed are metadata. these blocks shouldn't ++ * be used until this transaction is committed ++ */ ++ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); ++ new_entry->start_blk = bit; ++ new_entry->group = block_group; ++ new_entry->count = count; ++ new_entry->t_tid = handle->h_transaction->t_tid; ++ ext4_lock_group(sb, block_group); ++ mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, ++ bit, count); ++ ext4_mb_free_metadata(handle, &e4b, new_entry); ++ ext4_unlock_group(sb, block_group); + } else { + ext4_lock_group(sb, block_group); ++ /* need to update group_info->bb_free and bitmap ++ * with group lock held. generate_buddy look at ++ * them with group lock_held ++ */ ++ mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, ++ bit, count); + mb_free_blocks(inode, &e4b, bit, count); + ext4_mb_return_to_preallocation(inode, &e4b, block, count); + ext4_unlock_group(sb, block_group); +@@ -4942,6 +4976,10 @@ do_more: + + *freed += count; + ++ /* We dirtied the bitmap block */ ++ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); ++ err = ext4_journal_dirty_metadata(handle, bitmap_bh); ++ + /* And the group descriptor block */ + BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); + ret = ext4_journal_dirty_metadata(handle, gd_bh); diff --git a/queue-2.6.27/ext4-fix-race-between-read_block_bitmap-and-mark_diskspace_used.patch b/queue-2.6.27/ext4-fix-race-between-read_block_bitmap-and-mark_diskspace_used.patch new file mode 100644 index 00000000000..630df7fe089 --- /dev/null +++ b/queue-2.6.27/ext4-fix-race-between-read_block_bitmap-and-mark_diskspace_used.patch @@ -0,0 +1,91 @@ +From tytso@mit.edu Wed Feb 18 11:10:35 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:33 -0500 +Subject: ext4: Fix race between read_block_bitmap() and mark_diskspace_used() +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-14-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit e8134b27e351e813414da3b95aa8eac6d3908088) + +We need to make sure we update the block bitmap and clear +EXT4_BG_BLOCK_UNINIT flag with sb_bgl_lock held, since +ext4_read_block_bitmap() looks at EXT4_BG_BLOCK_UNINIT to decide +whether to initialize the block bitmap each time it is called +(introduced by commit c806e68f), and this can race with block +allocations in ext4_mb_mark_diskspace_used(). + +ext4_read_block_bitmap does: + +spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); +if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + ext4_init_block_bitmap(sb, bh, block_group, desc); + +Now on the block allocation side we do + +mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data, + ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); +.... +spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); +if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + +ie on allocation we update the bitmap then we take the sb_bgl_lock +and clear the EXT4_BG_BLOCK_UNINIT flag. What can happen is a +parallel ext4_read_block_bitmap can zero out the bitmap in between +the above mb_set_bits and spin_lock(sb_bg_lock..) + +The race results in below user visible errors +EXT4-fs error (device sdb1): ext4_mb_release_inode_pa: free 100, pa_free 105 +EXT4-fs error (device sdb1): mb_free_blocks: double-free of inode 0's block .. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1070,7 +1070,10 @@ static void mb_clear_bits(spinlock_t *lo + cur += 32; + continue; + } +- mb_clear_bit_atomic(lock, cur, bm); ++ if (lock) ++ mb_clear_bit_atomic(lock, cur, bm); ++ else ++ mb_clear_bit(cur, bm); + cur++; + } + } +@@ -1088,7 +1091,10 @@ static void mb_set_bits(spinlock_t *lock + cur += 32; + continue; + } +- mb_set_bit_atomic(lock, cur, bm); ++ if (lock) ++ mb_set_bit_atomic(lock, cur, bm); ++ else ++ mb_set_bit(cur, bm); + cur++; + } + } +@@ -3143,10 +3149,9 @@ ext4_mb_mark_diskspace_used(struct ext4_ + } + } + #endif +- mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data, +- ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); +- + spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); ++ mb_set_bits(NULL, bitmap_bh->b_data, ++ ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + gdp->bg_free_blocks_count = diff --git a/queue-2.6.27/ext4-fix-the-delalloc-writepages-to-allocate-blocks-at-the-right-offset.patch b/queue-2.6.27/ext4-fix-the-delalloc-writepages-to-allocate-blocks-at-the-right-offset.patch new file mode 100644 index 00000000000..06e9b9a1591 --- /dev/null +++ b/queue-2.6.27/ext4-fix-the-delalloc-writepages-to-allocate-blocks-at-the-right-offset.patch @@ -0,0 +1,99 @@ +From tytso@mit.edu Wed Feb 18 11:07:03 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:23 -0500 +Subject: ext4: Fix the delalloc writepages to allocate blocks at the right offset. +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-4-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 791b7f08954869d7b8ff438f3dac3cfb39778297) + +When iterating through the pages which have mapped buffer_heads, we +failed to update the b_state value. This results in allocating blocks +at logical offset 0. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/inode.c | 39 ++++++++++++++++++++++++++++++++++----- + 1 file changed, 34 insertions(+), 5 deletions(-) + +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1648,18 +1648,25 @@ struct mpage_da_data { + */ + static int mpage_da_submit_io(struct mpage_da_data *mpd) + { +- struct address_space *mapping = mpd->inode->i_mapping; +- int ret = 0, err, nr_pages, i; +- unsigned long index, end; ++ long pages_skipped; + struct pagevec pvec; ++ unsigned long index, end; ++ int ret = 0, err, nr_pages, i; ++ struct inode *inode = mpd->inode; ++ struct address_space *mapping = inode->i_mapping; + + BUG_ON(mpd->next_page <= mpd->first_page); +- pagevec_init(&pvec, 0); ++ /* ++ * We need to start from the first_page to the next_page - 1 ++ * to make sure we also write the mapped dirty buffer_heads. ++ * If we look at mpd->lbh.b_blocknr we would only be looking ++ * at the currently mapped buffer_heads. ++ */ + index = mpd->first_page; + end = mpd->next_page - 1; + ++ pagevec_init(&pvec, 0); + while (index <= end) { +- /* XXX: optimize tail */ + nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); + if (nr_pages == 0) + break; +@@ -1671,6 +1678,10 @@ static int mpage_da_submit_io(struct mpa + break; + index++; + ++ BUG_ON(!PageLocked(page)); ++ BUG_ON(PageWriteback(page)); ++ ++ pages_skipped = mpd->wbc->pages_skipped; + err = mapping->a_ops->writepage(page, mpd->wbc); + if (!err) + mpd->pages_written++; +@@ -1991,11 +2002,29 @@ static int __mpage_da_writepage(struct p + bh = head; + do { + BUG_ON(buffer_locked(bh)); ++ /* ++ * We need to try to allocate ++ * unmapped blocks in the same page. ++ * Otherwise we won't make progress ++ * with the page in ext4_da_writepage ++ */ + if (buffer_dirty(bh) && + (!buffer_mapped(bh) || buffer_delay(bh))) { + mpage_add_bh_to_extent(mpd, logical, bh); + if (mpd->io_done) + return MPAGE_DA_EXTENT_TAIL; ++ } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { ++ /* ++ * mapped dirty buffer. We need to update ++ * the b_state because we look at ++ * b_state in mpage_da_map_blocks. We don't ++ * update b_size because if we find an ++ * unmapped buffer_head later we need to ++ * use the b_state flag of that buffer_head. ++ */ ++ if (mpd->lbh.b_size == 0) ++ mpd->lbh.b_state = ++ bh->b_state & BH_FLAGS; + } + logical++; + } while ((bh = bh->b_this_page) != head); diff --git a/queue-2.6.27/ext4-fix-the-race-between-read_inode_bitmap-and-ext4_new_inode.patch b/queue-2.6.27/ext4-fix-the-race-between-read_inode_bitmap-and-ext4_new_inode.patch new file mode 100644 index 00000000000..2869fd51027 --- /dev/null +++ b/queue-2.6.27/ext4-fix-the-race-between-read_inode_bitmap-and-ext4_new_inode.patch @@ -0,0 +1,234 @@ +From tytso@mit.edu Wed Feb 18 11:10:57 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:34 -0500 +Subject: ext4: Fix the race between read_inode_bitmap() and ext4_new_inode() +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-15-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 393418676a7602e1d7d3f6e560159c65c8cbd50e) + +We need to make sure we update the inode bitmap and clear +EXT4_BG_INODE_UNINIT flag with sb_bgl_lock held, since +ext4_read_inode_bitmap() looks at EXT4_BG_INODE_UNINIT to decide +whether to initialize the inode bitmap each time it is called. +(introduced by commit c806e68f.) + +ext4_read_inode_bitmap does: + +spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); +if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { + ext4_init_inode_bitmap(sb, bh, block_group, desc); + +and ext4_new_inode does +if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group), + ino, inode_bitmap_bh->b_data)) + ...... + ... +spin_lock(sb_bgl_lock(sbi, group)); + +gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); +i.e., on allocation we update the bitmap then we take the sb_bgl_lock +and clear the EXT4_BG_INODE_UNINIT flag. What can happen is a +parallel ext4_read_inode_bitmap can zero out the bitmap in between +the above ext4_set_bit_atomic and spin_lock(sb_bg_lock..) + +The race results in below user visible errors +EXT4-fs error (device sdb1): ext4_free_inode: bit already cleared for inode 168449 +EXT4-fs warning (device sdb1): ext4_unlink: Deleting nonexistent file ... +EXT4-fs warning (device sdb1): ext4_rmdir: empty directory has too many links ... +ls: /mnt/tmp/f/p369/d3/d6/d39/db2/dee/d10f/d3f/l71: Stale NFS file handle + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ialloc.c | 140 ++++++++++++++++++++++++++++++++----------------------- + 1 file changed, 83 insertions(+), 57 deletions(-) + +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -567,6 +567,77 @@ static int find_group_other(struct super + } + + /* ++ * claim the inode from the inode bitmap. If the group ++ * is uninit we need to take the groups's sb_bgl_lock ++ * and clear the uninit flag. The inode bitmap update ++ * and group desc uninit flag clear should be done ++ * after holding sb_bgl_lock so that ext4_read_inode_bitmap ++ * doesn't race with the ext4_claim_inode ++ */ ++static int ext4_claim_inode(struct super_block *sb, ++ struct buffer_head *inode_bitmap_bh, ++ unsigned long ino, ext4_group_t group, int mode) ++{ ++ int free = 0, retval = 0; ++ struct ext4_sb_info *sbi = EXT4_SB(sb); ++ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); ++ ++ spin_lock(sb_bgl_lock(sbi, group)); ++ if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { ++ /* not a free inode */ ++ retval = 1; ++ goto err_ret; ++ } ++ ino++; ++ if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || ++ ino > EXT4_INODES_PER_GROUP(sb)) { ++ spin_unlock(sb_bgl_lock(sbi, group)); ++ ext4_error(sb, __func__, ++ "reserved inode or inode > inodes count - " ++ "block_group = %lu, inode=%lu", group, ++ ino + group * EXT4_INODES_PER_GROUP(sb)); ++ return 1; ++ } ++ /* If we didn't allocate from within the initialized part of the inode ++ * table then we need to initialize up to this inode. */ ++ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { ++ ++ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { ++ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); ++ /* When marking the block group with ++ * ~EXT4_BG_INODE_UNINIT we don't want to depend ++ * on the value of bg_itable_unused even though ++ * mke2fs could have initialized the same for us. ++ * Instead we calculated the value below ++ */ ++ ++ free = 0; ++ } else { ++ free = EXT4_INODES_PER_GROUP(sb) - ++ le16_to_cpu(gdp->bg_itable_unused); ++ } ++ ++ /* ++ * Check the relative inode number against the last used ++ * relative inode number in this group. if it is greater ++ * we need to update the bg_itable_unused count ++ * ++ */ ++ if (ino > free) ++ gdp->bg_itable_unused = ++ cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino); ++ } ++ le16_add_cpu(&gdp->bg_free_inodes_count, -1); ++ if (S_ISDIR(mode)) { ++ le16_add_cpu(&gdp->bg_used_dirs_count, 1); ++ } ++ gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ++err_ret: ++ spin_unlock(sb_bgl_lock(sbi, group)); ++ return retval; ++} ++ ++/* + * There are two policies for allocating an inode. If the new inode is + * a directory, then a forward search is made for a block group with both + * free space and a low directory-to-inode ratio; if that fails, then of +@@ -649,8 +720,12 @@ repeat_in_this_group: + if (err) + goto fail; + +- if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group), +- ino, bitmap_bh->b_data)) { ++ BUFFER_TRACE(bh2, "get_write_access"); ++ err = ext4_journal_get_write_access(handle, bh2); ++ if (err) ++ goto fail; ++ if (!ext4_claim_inode(sb, bitmap_bh, ++ ino, group, mode)) { + /* we won it */ + BUFFER_TRACE(bitmap_bh, + "call ext4_journal_dirty_metadata"); +@@ -658,10 +733,13 @@ repeat_in_this_group: + bitmap_bh); + if (err) + goto fail; ++ /* zero bit is inode number 1*/ ++ ino++; + goto got; + } + /* we lost it */ + jbd2_journal_release_buffer(handle, bitmap_bh); ++ jbd2_journal_release_buffer(handle, bh2); + + if (++ino < EXT4_INODES_PER_GROUP(sb)) + goto repeat_in_this_group; +@@ -681,21 +759,6 @@ repeat_in_this_group: + goto out; + + got: +- ino++; +- if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || +- ino > EXT4_INODES_PER_GROUP(sb)) { +- ext4_error(sb, __func__, +- "reserved inode or inode > inodes count - " +- "block_group = %lu, inode=%lu", group, +- ino + group * EXT4_INODES_PER_GROUP(sb)); +- err = -EIO; +- goto fail; +- } +- +- BUFFER_TRACE(bh2, "get_write_access"); +- err = ext4_journal_get_write_access(handle, bh2); +- if (err) goto fail; +- + /* We may have to initialize the block bitmap if it isn't already */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && + gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { +@@ -730,47 +793,10 @@ got: + if (err) + goto fail; + } +- +- spin_lock(sb_bgl_lock(sbi, group)); +- /* If we didn't allocate from within the initialized part of the inode +- * table then we need to initialize up to this inode. */ +- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { +- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); +- +- /* When marking the block group with +- * ~EXT4_BG_INODE_UNINIT we don't want to depend +- * on the value of bg_itable_unused even though +- * mke2fs could have initialized the same for us. +- * Instead we calculated the value below +- */ +- +- free = 0; +- } else { +- free = EXT4_INODES_PER_GROUP(sb) - +- le16_to_cpu(gdp->bg_itable_unused); +- } +- +- /* +- * Check the relative inode number against the last used +- * relative inode number in this group. if it is greater +- * we need to update the bg_itable_unused count +- * +- */ +- if (ino > free) +- gdp->bg_itable_unused = +- cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino); +- } +- +- le16_add_cpu(&gdp->bg_free_inodes_count, -1); +- if (S_ISDIR(mode)) { +- le16_add_cpu(&gdp->bg_used_dirs_count, 1); +- } +- gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); +- spin_unlock(sb_bgl_lock(sbi, group)); +- BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); ++ BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); + err = ext4_journal_dirty_metadata(handle, bh2); +- if (err) goto fail; ++ if (err) ++ goto fail; + + percpu_counter_dec(&sbi->s_freeinodes_counter); + if (S_ISDIR(mode)) diff --git a/queue-2.6.27/ext4-init-the-complete-page-while-building-buddy-cache.patch b/queue-2.6.27/ext4-init-the-complete-page-while-building-buddy-cache.patch new file mode 100644 index 00000000000..73a58e8634b --- /dev/null +++ b/queue-2.6.27/ext4-init-the-complete-page-while-building-buddy-cache.patch @@ -0,0 +1,48 @@ +From tytso@mit.edu Wed Feb 18 11:12:58 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:39 -0500 +Subject: ext4: Init the complete page while building buddy cache +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-20-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 29eaf024980e07cc01f31ae4ea5d68c917f4b7da) + +We need to init the complete page during buddy cache init +by setting the contents to '1'. Otherwise we can see the +following errors after doing an online resize of the +filesystem: + +EXT4-fs error (device sdb1): ext4_mb_mark_diskspace_used: + Allocating block 1040385 in system zone of 127 group + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -848,6 +848,8 @@ static int ext4_mb_init_cache(struct pag + + err = 0; + first_block = page->index * blocks_per_page; ++ /* init the page */ ++ memset(page_address(page), 0xff, PAGE_CACHE_SIZE); + for (i = 0; i < blocks_per_page; i++) { + int group; + struct ext4_group_info *grinfo; +@@ -874,7 +876,6 @@ static int ext4_mb_init_cache(struct pag + BUG_ON(incore == NULL); + mb_debug("put buddy for group %u in page %lu/%x\n", + group, page->index, i * blocksize); +- memset(data, 0xff, blocksize); + grinfo = ext4_get_group_info(sb, group); + grinfo->bb_fragments = 0; + memset(grinfo->bb_counters, 0, diff --git a/queue-2.6.27/ext4-initialize-the-new-group-descriptor-when-resizing-the-filesystem.patch b/queue-2.6.27/ext4-initialize-the-new-group-descriptor-when-resizing-the-filesystem.patch new file mode 100644 index 00000000000..8c9c8797eec --- /dev/null +++ b/queue-2.6.27/ext4-initialize-the-new-group-descriptor-when-resizing-the-filesystem.patch @@ -0,0 +1,42 @@ +From tytso@mit.edu Wed Feb 18 11:14:37 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:44 -0500 +Subject: ext4: Initialize the new group descriptor when resizing the filesystem +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-25-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit fdff73f094e7220602cc3f8959c7230517976412) + +Make sure all of the fields of the group descriptor are properly +initialized. Previously, we allowed bg_flags field to be contain +random garbage, which could trigger non-deterministic behavior, +including a kernel OOPS. + +http://bugzilla.kernel.org/show_bug.cgi?id=12433 + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/resize.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -860,11 +860,13 @@ int ext4_group_add(struct super_block *s + gdp = (struct ext4_group_desc *)((char *)primary->b_data + + gdb_off * EXT4_DESC_SIZE(sb)); + ++ memset(gdp, 0, EXT4_DESC_SIZE(sb)); + ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ + ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ + ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ + gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count); + gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb)); ++ gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED); + gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); + + /* diff --git a/queue-2.6.27/ext4-mark-the-blocks-inode-bitmap-beyond-end-of-group-as-used.patch b/queue-2.6.27/ext4-mark-the-blocks-inode-bitmap-beyond-end-of-group-as-used.patch new file mode 100644 index 00000000000..e3eb247c789 --- /dev/null +++ b/queue-2.6.27/ext4-mark-the-blocks-inode-bitmap-beyond-end-of-group-as-used.patch @@ -0,0 +1,74 @@ +From tytso@mit.edu Wed Feb 18 11:12:13 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:37 -0500 +Subject: ext4: mark the blocks/inode bitmap beyond end of group as used +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-18-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 648f5879f5892dddd3ba71cd0d285599f40f2512) + +We need to mark the block/inode bitmap beyond the end of the group +with '1'. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ialloc.c | 2 +- + fs/ext4/mballoc.c | 4 ++-- + fs/ext4/resize.c | 6 ++---- + 3 files changed, 5 insertions(+), 7 deletions(-) + +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -84,7 +84,7 @@ unsigned ext4_init_inode_bitmap(struct s + } + + memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); +- mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), ++ mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, + bh->b_data); + + return EXT4_INODES_PER_GROUP(sb); +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3148,8 +3148,8 @@ ext4_mb_mark_diskspace_used(struct ext4_ + in_range(block + len - 1, ext4_inode_table(sb, gdp), + EXT4_SB(sb)->s_itb_per_group)) { + ext4_error(sb, __func__, +- "Allocating block in system zone - block = %llu", +- block); ++ "Allocating block %llu in system zone of %d group\n", ++ block, ac->ac_b_ex.fe_group); + /* File system mounted not to panic on error + * Fix the bitmap and repeat the block allocation + * We leak some of the blocks here. +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -284,11 +284,9 @@ static int setup_new_group_blocks(struct + if ((err = extend_or_restart_transaction(handle, 2, bh))) + goto exit_bh; + +- mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb), +- bh->b_data); ++ mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data); + ext4_journal_dirty_metadata(handle, bh); + brelse(bh); +- + /* Mark unused entries in inode bitmap used */ + ext4_debug("clear inode bitmap %#04llx (+%llu)\n", + input->inode_bitmap, input->inode_bitmap - start); +@@ -297,7 +295,7 @@ static int setup_new_group_blocks(struct + goto exit_journal; + } + +- mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), ++ mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, + bh->b_data); + ext4_journal_dirty_metadata(handle, bh); + exit_bh: diff --git a/queue-2.6.27/ext4-only-use-i_size_high-for-regular-files.patch b/queue-2.6.27/ext4-only-use-i_size_high-for-regular-files.patch new file mode 100644 index 00000000000..3a0ea4fb682 --- /dev/null +++ b/queue-2.6.27/ext4-only-use-i_size_high-for-regular-files.patch @@ -0,0 +1,66 @@ +From tytso@mit.edu Wed Feb 18 11:13:37 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:41 -0500 +Subject: ext4: only use i_size_high for regular files +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-22-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit 06a279d636734da32bb62dd2f7b0ade666f65d7c) + +Directories are not allowed to be bigger than 2GB, so don't use +i_size_high for anything other than regular files. E2fsck should +complain about these inodes, but the simplest thing to do for the +kernel is to only use i_size_high for regular files. + +This prevents an intentially corrupted filesystem from causing the +kernel to burn a huge amount of CPU and issuing error messages such +as: + +EXT4-fs warning (device loop0): ext4_block_to_path: block 135090028 > max + +Thanks to David Maciejak from Fortinet's FortiGuard Global Security +Research Team for reporting this issue. + +http://bugzilla.kernel.org/show_bug.cgi?id=12375 + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ext4.h | 7 +++++-- + fs/ext4/inode.c | 4 ++-- + 2 files changed, 7 insertions(+), 4 deletions(-) + +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1174,8 +1174,11 @@ static inline void ext4_r_blocks_count_s + + static inline loff_t ext4_isize(struct ext4_inode *raw_inode) + { +- return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | +- le32_to_cpu(raw_inode->i_size_lo); ++ if (S_ISREG(le16_to_cpu(raw_inode->i_mode))) ++ return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | ++ le32_to_cpu(raw_inode->i_size_lo); ++ else ++ return (loff_t) le32_to_cpu(raw_inode->i_size_lo); + } + + static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -351,9 +351,9 @@ static int ext4_block_to_path(struct ino + final = ptrs; + } else { + ext4_warning(inode->i_sb, "ext4_block_to_path", +- "block %lu > max", ++ "block %lu > max in inode %lu", + i_block + direct_blocks + +- indirect_blocks + double_blocks); ++ indirect_blocks + double_blocks, inode->i_ino); + } + if (boundary) + *boundary = final - 1 - (i_block & (ptrs - 1)); diff --git a/queue-2.6.27/ext4-tone-down-ext4_da_writepages-warnings.patch b/queue-2.6.27/ext4-tone-down-ext4_da_writepages-warnings.patch new file mode 100644 index 00000000000..f88dd36ff8f --- /dev/null +++ b/queue-2.6.27/ext4-tone-down-ext4_da_writepages-warnings.patch @@ -0,0 +1,58 @@ +From tytso@mit.edu Wed Feb 18 11:06:47 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:22 -0500 +Subject: ext4: tone down ext4_da_writepages warnings +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-3-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit 2a21e37e48b94388f2cc8c0392f104f5443d4bb8) + +If the filesystem has errors, ext4_da_writepages() will return a *lot* +of errors, including lots and lots of stack dumps. While it's true +that we are dropping user data on the floor, which is unfortunate, the +stack dumps aren't helpful, and they tend to obscure the true original +root cause of the problem. So in the case where the filesystem has +aborted, return an EROFS right away. + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/inode.c | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -2298,6 +2298,20 @@ static int ext4_da_writepages(struct add + */ + if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) + return 0; ++ ++ /* ++ * If the filesystem has aborted, it is read-only, so return ++ * right away instead of dumping stack traces later on that ++ * will obscure the real source of the problem. We test ++ * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because ++ * the latter could be true if the filesystem is mounted ++ * read-only, and in that case, ext4_da_writepages should ++ * *never* be called, so if that ever happens, we would want ++ * the stack trace. ++ */ ++ if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT)) ++ return -EROFS; ++ + /* + * Make sure nr_to_write is >= sbi->s_mb_stream_request + * This make sure small files blocks are allocated in +@@ -2336,7 +2350,7 @@ restart_loop: + handle = ext4_journal_start(inode, needed_blocks); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); +- printk(KERN_EMERG "%s: jbd2_start: " ++ printk(KERN_CRIT "%s: jbd2_start: " + "%ld pages, ino %lu; err %d\n", __func__, + wbc->nr_to_write, inode->i_ino, ret); + dump_stack(); diff --git a/queue-2.6.27/ext4-use-an-rbtree-for-tracking-blocks-freed-during-transaction.patch b/queue-2.6.27/ext4-use-an-rbtree-for-tracking-blocks-freed-during-transaction.patch new file mode 100644 index 00000000000..173d38af4b7 --- /dev/null +++ b/queue-2.6.27/ext4-use-an-rbtree-for-tracking-blocks-freed-during-transaction.patch @@ -0,0 +1,347 @@ +From tytso@mit.edu Wed Feb 18 11:09:56 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:31 -0500 +Subject: ext4: Use an rbtree for tracking blocks freed during transaction. +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-12-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit c894058d66637c7720569fbe12957f4de64d9991 to allow +commit e21675d4 to be included in 2.6.27.y) + +With this patch we track the block freed during a transaction using +red-black tree. We also make sure contiguous blocks freed are collected +in one node in the tree. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: Theodore Ts'o +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/mballoc.c | 186 ++++++++++++++++++++++++++++++++++-------------------- + fs/ext4/mballoc.h | 25 ++++--- + 2 files changed, 134 insertions(+), 77 deletions(-) + +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -332,6 +332,7 @@ + */ + static struct kmem_cache *ext4_pspace_cachep; + static struct kmem_cache *ext4_ac_cachep; ++static struct kmem_cache *ext4_free_ext_cachep; + static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, + ext4_group_t group); + static int ext4_mb_init_per_dev_proc(struct super_block *sb); +@@ -2506,6 +2507,7 @@ int ext4_mb_add_groupinfo(struct super_b + + INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); + init_rwsem(&meta_group_info[i]->alloc_sem); ++ meta_group_info[i]->bb_free_root.rb_node = NULL;; + + #ifdef DOUBLE_CHECK + { +@@ -2819,13 +2821,11 @@ int ext4_mb_release(struct super_block * + static noinline_for_stack void + ext4_mb_free_committed_blocks(struct super_block *sb) + { +- struct ext4_sb_info *sbi = EXT4_SB(sb); +- int err; +- int i; +- int count = 0; +- int count2 = 0; +- struct ext4_free_metadata *md; + struct ext4_buddy e4b; ++ struct ext4_group_info *db; ++ struct ext4_sb_info *sbi = EXT4_SB(sb); ++ int err, count = 0, count2 = 0; ++ struct ext4_free_data *entry; + + if (list_empty(&sbi->s_committed_transaction)) + return; +@@ -2833,44 +2833,46 @@ ext4_mb_free_committed_blocks(struct sup + /* there is committed blocks to be freed yet */ + do { + /* get next array of blocks */ +- md = NULL; ++ entry = NULL; + spin_lock(&sbi->s_md_lock); + if (!list_empty(&sbi->s_committed_transaction)) { +- md = list_entry(sbi->s_committed_transaction.next, +- struct ext4_free_metadata, list); +- list_del(&md->list); ++ entry = list_entry(sbi->s_committed_transaction.next, ++ struct ext4_free_data, list); ++ list_del(&entry->list); + } + spin_unlock(&sbi->s_md_lock); + +- if (md == NULL) ++ if (entry == NULL) + break; + + mb_debug("gonna free %u blocks in group %lu (0x%p):", +- md->num, md->group, md); ++ entry->count, entry->group, entry); + +- err = ext4_mb_load_buddy(sb, md->group, &e4b); ++ err = ext4_mb_load_buddy(sb, entry->group, &e4b); + /* we expect to find existing buddy because it's pinned */ + BUG_ON(err != 0); + ++ db = e4b.bd_info; + /* there are blocks to put in buddy to make them really free */ +- count += md->num; ++ count += entry->count; + count2++; +- ext4_lock_group(sb, md->group); +- for (i = 0; i < md->num; i++) { +- mb_debug(" %u", md->blocks[i]); +- mb_free_blocks(NULL, &e4b, md->blocks[i], 1); +- } +- mb_debug("\n"); +- ext4_unlock_group(sb, md->group); +- +- /* balance refcounts from ext4_mb_free_metadata() */ +- page_cache_release(e4b.bd_buddy_page); +- page_cache_release(e4b.bd_bitmap_page); ++ ext4_lock_group(sb, entry->group); ++ /* Take it out of per group rb tree */ ++ rb_erase(&entry->node, &(db->bb_free_root)); ++ mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count); ++ ++ if (!db->bb_free_root.rb_node) { ++ /* No more items in the per group rb tree ++ * balance refcounts from ext4_mb_free_metadata() ++ */ ++ page_cache_release(e4b.bd_buddy_page); ++ page_cache_release(e4b.bd_bitmap_page); ++ } ++ ext4_unlock_group(sb, entry->group); + +- kfree(md); ++ kmem_cache_free(ext4_free_ext_cachep, entry); + ext4_mb_release_desc(&e4b); +- +- } while (md); ++ } while (1); + + mb_debug("freed %u blocks in %u structures\n", count, count2); + } +@@ -3025,6 +3027,16 @@ int __init init_ext4_mballoc(void) + kmem_cache_destroy(ext4_pspace_cachep); + return -ENOMEM; + } ++ ++ ext4_free_ext_cachep = ++ kmem_cache_create("ext4_free_block_extents", ++ sizeof(struct ext4_free_data), ++ 0, SLAB_RECLAIM_ACCOUNT, NULL); ++ if (ext4_free_ext_cachep == NULL) { ++ kmem_cache_destroy(ext4_pspace_cachep); ++ kmem_cache_destroy(ext4_ac_cachep); ++ return -ENOMEM; ++ } + #ifdef CONFIG_PROC_FS + proc_root_ext4 = proc_mkdir("fs/ext4", NULL); + if (proc_root_ext4 == NULL) +@@ -3041,6 +3053,7 @@ void exit_ext4_mballoc(void) + #ifdef CONFIG_PROC_FS + remove_proc_entry("fs/ext4", NULL); + #endif ++ kmem_cache_destroy(ext4_free_ext_cachep); + } + + +@@ -3561,6 +3574,7 @@ ext4_mb_use_preallocated(struct ext4_all + ac->ac_criteria = 20; + return 1; + } ++ + return 0; + } + +@@ -4678,6 +4692,21 @@ static void ext4_mb_poll_new_transaction + ext4_mb_free_committed_blocks(sb); + } + ++/* ++ * We can merge two free data extents only if the physical blocks ++ * are contiguous, AND the extents were freed by the same transaction, ++ * AND the blocks are associated with the same group. ++ */ ++static int can_merge(struct ext4_free_data *entry1, ++ struct ext4_free_data *entry2) ++{ ++ if ((entry1->t_tid == entry2->t_tid) && ++ (entry1->group == entry2->group) && ++ ((entry1->start_blk + entry1->count) == entry2->start_blk)) ++ return 1; ++ return 0; ++} ++ + static noinline_for_stack int + ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, + ext4_group_t group, ext4_grpblk_t block, int count) +@@ -4685,57 +4714,80 @@ ext4_mb_free_metadata(handle_t *handle, + struct ext4_group_info *db = e4b->bd_info; + struct super_block *sb = e4b->bd_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct ext4_free_metadata *md; +- int i; ++ struct ext4_free_data *entry, *new_entry; ++ struct rb_node **n = &db->bb_free_root.rb_node, *node; ++ struct rb_node *parent = NULL, *new_node; ++ + + BUG_ON(e4b->bd_bitmap_page == NULL); + BUG_ON(e4b->bd_buddy_page == NULL); + ++ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); ++ new_entry->start_blk = block; ++ new_entry->group = group; ++ new_entry->count = count; ++ new_entry->t_tid = handle->h_transaction->t_tid; ++ new_node = &new_entry->node; ++ + ext4_lock_group(sb, group); +- for (i = 0; i < count; i++) { +- md = db->bb_md_cur; +- if (md && db->bb_tid != handle->h_transaction->t_tid) { +- db->bb_md_cur = NULL; +- md = NULL; ++ if (!*n) { ++ /* first free block exent. We need to ++ protect buddy cache from being freed, ++ * otherwise we'll refresh it from ++ * on-disk bitmap and lose not-yet-available ++ * blocks */ ++ page_cache_get(e4b->bd_buddy_page); ++ page_cache_get(e4b->bd_bitmap_page); ++ } ++ while (*n) { ++ parent = *n; ++ entry = rb_entry(parent, struct ext4_free_data, node); ++ if (block < entry->start_blk) ++ n = &(*n)->rb_left; ++ else if (block >= (entry->start_blk + entry->count)) ++ n = &(*n)->rb_right; ++ else { ++ ext4_error(sb, __func__, ++ "Double free of blocks %d (%d %d)\n", ++ block, entry->start_blk, entry->count); ++ return 0; + } ++ } + +- if (md == NULL) { +- ext4_unlock_group(sb, group); +- md = kmalloc(sizeof(*md), GFP_NOFS); +- if (md == NULL) +- return -ENOMEM; +- md->num = 0; +- md->group = group; ++ rb_link_node(new_node, parent, n); ++ rb_insert_color(new_node, &db->bb_free_root); + +- ext4_lock_group(sb, group); +- if (db->bb_md_cur == NULL) { +- spin_lock(&sbi->s_md_lock); +- list_add(&md->list, &sbi->s_active_transaction); +- spin_unlock(&sbi->s_md_lock); +- /* protect buddy cache from being freed, +- * otherwise we'll refresh it from +- * on-disk bitmap and lose not-yet-available +- * blocks */ +- page_cache_get(e4b->bd_buddy_page); +- page_cache_get(e4b->bd_bitmap_page); +- db->bb_md_cur = md; +- db->bb_tid = handle->h_transaction->t_tid; +- mb_debug("new md 0x%p for group %lu\n", +- md, md->group); +- } else { +- kfree(md); +- md = db->bb_md_cur; +- } ++ /* Now try to see the extent can be merged to left and right */ ++ node = rb_prev(new_node); ++ if (node) { ++ entry = rb_entry(node, struct ext4_free_data, node); ++ if (can_merge(entry, new_entry)) { ++ new_entry->start_blk = entry->start_blk; ++ new_entry->count += entry->count; ++ rb_erase(node, &(db->bb_free_root)); ++ spin_lock(&sbi->s_md_lock); ++ list_del(&entry->list); ++ spin_unlock(&sbi->s_md_lock); ++ kmem_cache_free(ext4_free_ext_cachep, entry); + } ++ } + +- BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS); +- md->blocks[md->num] = block + i; +- md->num++; +- if (md->num == EXT4_BB_MAX_BLOCKS) { +- /* no more space, put full container on a sb's list */ +- db->bb_md_cur = NULL; ++ node = rb_next(new_node); ++ if (node) { ++ entry = rb_entry(node, struct ext4_free_data, node); ++ if (can_merge(new_entry, entry)) { ++ new_entry->count += entry->count; ++ rb_erase(node, &(db->bb_free_root)); ++ spin_lock(&sbi->s_md_lock); ++ list_del(&entry->list); ++ spin_unlock(&sbi->s_md_lock); ++ kmem_cache_free(ext4_free_ext_cachep, entry); + } + } ++ /* Add the extent to active_transaction list */ ++ spin_lock(&sbi->s_md_lock); ++ list_add(&new_entry->list, &sbi->s_active_transaction); ++ spin_unlock(&sbi->s_md_lock); + ext4_unlock_group(sb, group); + return 0; + } +--- a/fs/ext4/mballoc.h ++++ b/fs/ext4/mballoc.h +@@ -97,22 +97,27 @@ + */ + #define MB_DEFAULT_GROUP_PREALLOC 512 + +-#ifdef EXT4_BB_MAX_BLOCKS +-#undef EXT4_BB_MAX_BLOCKS +-#endif +-#define EXT4_BB_MAX_BLOCKS 30 ++struct ext4_free_data { ++ /* this links the free block information from group_info */ ++ struct rb_node node; + +-struct ext4_free_metadata { +- ext4_group_t group; +- unsigned short num; +- ext4_grpblk_t blocks[EXT4_BB_MAX_BLOCKS]; ++ /* this links the free block information from ext4_sb_info */ + struct list_head list; ++ ++ /* group which free block extent belongs */ ++ ext4_group_t group; ++ ++ /* free block extent */ ++ ext4_grpblk_t start_blk; ++ ext4_grpblk_t count; ++ ++ /* transaction which freed this extent */ ++ tid_t t_tid; + }; + + struct ext4_group_info { + unsigned long bb_state; +- unsigned long bb_tid; +- struct ext4_free_metadata *bb_md_cur; ++ struct rb_root bb_free_root; + unsigned short bb_first_free; + unsigned short bb_free; + unsigned short bb_fragments; diff --git a/queue-2.6.27/ext4-use-ext4_group_info_need_init_bit-during-resize.patch b/queue-2.6.27/ext4-use-ext4_group_info_need_init_bit-during-resize.patch new file mode 100644 index 00000000000..33207d96737 --- /dev/null +++ b/queue-2.6.27/ext4-use-ext4_group_info_need_init_bit-during-resize.patch @@ -0,0 +1,581 @@ +From tytso@mit.edu Wed Feb 18 11:09:15 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:29 -0500 +Subject: ext4: Use EXT4_GROUP_INFO_NEED_INIT_BIT during resize +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-10-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 920313a726e04fef0f2c0bcb04ad8229c0e700d8) + +The new groups added during resize are flagged as +need_init group. Make sure we properly initialize these +groups. When we have block size < page size and we are adding +new groups the page may still be marked uptodate even though +we haven't initialized the group. While forcing the init +of buddy cache we need to make sure other groups part of the +same page of buddy cache is not using the cache. +group_info->alloc_sem is added to ensure the same. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/balloc.c | 21 ++-- + fs/ext4/ext4.h | 7 - + fs/ext4/mballoc.c | 259 +++++++++++++++++++++++++++++++++++++++++------------- + fs/ext4/mballoc.h | 3 + fs/ext4/resize.c | 42 +------- + 5 files changed, 229 insertions(+), 103 deletions(-) + +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -868,6 +868,7 @@ void ext4_add_groupblocks(handle_t *hand + ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); + + ext4_get_group_no_and_offset(sb, block, &block_group, &bit); ++ grp = ext4_get_group_info(sb, block_group); + /* + * Check to see if we are freeing blocks across a group + * boundary. +@@ -912,7 +913,11 @@ void ext4_add_groupblocks(handle_t *hand + err = ext4_journal_get_write_access(handle, gd_bh); + if (err) + goto error_return; +- ++ /* ++ * make sure we don't allow a parallel init on other groups in the ++ * same buddy cache ++ */ ++ down_write(&grp->alloc_sem); + for (i = 0, blocks_freed = 0; i < count; i++) { + BUFFER_TRACE(bitmap_bh, "clear bit"); + if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), +@@ -937,6 +942,13 @@ void ext4_add_groupblocks(handle_t *hand + sbi->s_flex_groups[flex_group].free_blocks += blocks_freed; + spin_unlock(sb_bgl_lock(sbi, flex_group)); + } ++ /* ++ * request to reload the buddy with the ++ * new bitmap information ++ */ ++ set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); ++ ext4_mb_update_group_info(grp, blocks_freed); ++ up_write(&grp->alloc_sem); + + /* We dirtied the bitmap block */ + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); +@@ -948,13 +960,6 @@ void ext4_add_groupblocks(handle_t *hand + if (!err) + err = ret; + sb->s_dirt = 1; +- /* +- * request to reload the buddy with the +- * new bitmap information +- */ +- grp = ext4_get_group_info(sb, block_group); +- set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); +- ext4_mb_update_group_info(grp, blocks_freed); + + error_return: + brelse(bitmap_bh); +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1043,12 +1043,13 @@ extern int __init init_ext4_mballoc(void + extern void exit_ext4_mballoc(void); + extern void ext4_mb_free_blocks(handle_t *, struct inode *, + unsigned long, unsigned long, int, unsigned long *); +-extern int ext4_mb_add_more_groupinfo(struct super_block *sb, ++extern int ext4_mb_add_groupinfo(struct super_block *sb, + ext4_group_t i, struct ext4_group_desc *desc); + extern void ext4_mb_update_group_info(struct ext4_group_info *grp, + ext4_grpblk_t add); +- +- ++extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t); ++extern void ext4_mb_put_buddy_cache_lock(struct super_block *, ++ ext4_group_t, int); + /* inode.c */ + int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, + struct buffer_head *bh, ext4_fsblk_t blocknr); +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -886,18 +886,20 @@ static noinline_for_stack int + ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + struct ext4_buddy *e4b) + { +- struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct inode *inode = sbi->s_buddy_cache; + int blocks_per_page; + int block; + int pnum; + int poff; + struct page *page; + int ret; ++ struct ext4_group_info *grp; ++ struct ext4_sb_info *sbi = EXT4_SB(sb); ++ struct inode *inode = sbi->s_buddy_cache; + + mb_debug("load group %lu\n", group); + + blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; ++ grp = ext4_get_group_info(sb, group); + + e4b->bd_blkbits = sb->s_blocksize_bits; + e4b->bd_info = ext4_get_group_info(sb, group); +@@ -905,6 +907,15 @@ ext4_mb_load_buddy(struct super_block *s + e4b->bd_group = group; + e4b->bd_buddy_page = NULL; + e4b->bd_bitmap_page = NULL; ++ e4b->alloc_semp = &grp->alloc_sem; ++ ++ /* Take the read lock on the group alloc ++ * sem. This would make sure a parallel ++ * ext4_mb_init_group happening on other ++ * groups mapped by the page is blocked ++ * till we are done with allocation ++ */ ++ down_read(e4b->alloc_semp); + + /* + * the buddy cache inode stores the block bitmap +@@ -920,6 +931,14 @@ ext4_mb_load_buddy(struct super_block *s + page = find_get_page(inode->i_mapping, pnum); + if (page == NULL || !PageUptodate(page)) { + if (page) ++ /* ++ * drop the page reference and try ++ * to get the page with lock. If we ++ * are not uptodate that implies ++ * somebody just created the page but ++ * is yet to initialize the same. So ++ * wait for it to initialize. ++ */ + page_cache_release(page); + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + if (page) { +@@ -985,6 +1004,9 @@ err: + page_cache_release(e4b->bd_buddy_page); + e4b->bd_buddy = NULL; + e4b->bd_bitmap = NULL; ++ ++ /* Done with the buddy cache */ ++ up_read(e4b->alloc_semp); + return ret; + } + +@@ -994,6 +1016,8 @@ static void ext4_mb_release_desc(struct + page_cache_release(e4b->bd_bitmap_page); + if (e4b->bd_buddy_page) + page_cache_release(e4b->bd_buddy_page); ++ /* Done with the buddy cache */ ++ up_read(e4b->alloc_semp); + } + + +@@ -1694,6 +1718,173 @@ static int ext4_mb_good_group(struct ext + return 0; + } + ++/* ++ * lock the group_info alloc_sem of all the groups ++ * belonging to the same buddy cache page. This ++ * make sure other parallel operation on the buddy ++ * cache doesn't happen whild holding the buddy cache ++ * lock ++ */ ++int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group) ++{ ++ int i; ++ int block, pnum; ++ int blocks_per_page; ++ int groups_per_page; ++ ext4_group_t first_group; ++ struct ext4_group_info *grp; ++ ++ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; ++ /* ++ * the buddy cache inode stores the block bitmap ++ * and buddy information in consecutive blocks. ++ * So for each group we need two blocks. ++ */ ++ block = group * 2; ++ pnum = block / blocks_per_page; ++ first_group = pnum * blocks_per_page / 2; ++ ++ groups_per_page = blocks_per_page >> 1; ++ if (groups_per_page == 0) ++ groups_per_page = 1; ++ /* read all groups the page covers into the cache */ ++ for (i = 0; i < groups_per_page; i++) { ++ ++ if ((first_group + i) >= EXT4_SB(sb)->s_groups_count) ++ break; ++ grp = ext4_get_group_info(sb, first_group + i); ++ /* take all groups write allocation ++ * semaphore. This make sure there is ++ * no block allocation going on in any ++ * of that groups ++ */ ++ down_write(&grp->alloc_sem); ++ } ++ return i; ++} ++ ++void ext4_mb_put_buddy_cache_lock(struct super_block *sb, ++ ext4_group_t group, int locked_group) ++{ ++ int i; ++ int block, pnum; ++ int blocks_per_page; ++ ext4_group_t first_group; ++ struct ext4_group_info *grp; ++ ++ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; ++ /* ++ * the buddy cache inode stores the block bitmap ++ * and buddy information in consecutive blocks. ++ * So for each group we need two blocks. ++ */ ++ block = group * 2; ++ pnum = block / blocks_per_page; ++ first_group = pnum * blocks_per_page / 2; ++ /* release locks on all the groups */ ++ for (i = 0; i < locked_group; i++) { ++ ++ grp = ext4_get_group_info(sb, first_group + i); ++ /* take all groups write allocation ++ * semaphore. This make sure there is ++ * no block allocation going on in any ++ * of that groups ++ */ ++ up_write(&grp->alloc_sem); ++ } ++ ++} ++ ++static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ++{ ++ ++ int ret; ++ void *bitmap; ++ int blocks_per_page; ++ int block, pnum, poff; ++ int num_grp_locked = 0; ++ struct ext4_group_info *this_grp; ++ struct ext4_sb_info *sbi = EXT4_SB(sb); ++ struct inode *inode = sbi->s_buddy_cache; ++ struct page *page = NULL, *bitmap_page = NULL; ++ ++ mb_debug("init group %lu\n", group); ++ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; ++ this_grp = ext4_get_group_info(sb, group); ++ /* ++ * This ensures we don't add group ++ * to this buddy cache via resize ++ */ ++ num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); ++ if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { ++ /* ++ * somebody initialized the group ++ * return without doing anything ++ */ ++ ret = 0; ++ goto err; ++ } ++ /* ++ * the buddy cache inode stores the block bitmap ++ * and buddy information in consecutive blocks. ++ * So for each group we need two blocks. ++ */ ++ block = group * 2; ++ pnum = block / blocks_per_page; ++ poff = block % blocks_per_page; ++ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ if (page) { ++ BUG_ON(page->mapping != inode->i_mapping); ++ ret = ext4_mb_init_cache(page, NULL); ++ if (ret) { ++ unlock_page(page); ++ goto err; ++ } ++ unlock_page(page); ++ } ++ if (page == NULL || !PageUptodate(page)) { ++ ret = -EIO; ++ goto err; ++ } ++ mark_page_accessed(page); ++ bitmap_page = page; ++ bitmap = page_address(page) + (poff * sb->s_blocksize); ++ ++ /* init buddy cache */ ++ block++; ++ pnum = block / blocks_per_page; ++ poff = block % blocks_per_page; ++ page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ if (page == bitmap_page) { ++ /* ++ * If both the bitmap and buddy are in ++ * the same page we don't need to force ++ * init the buddy ++ */ ++ unlock_page(page); ++ } else if (page) { ++ BUG_ON(page->mapping != inode->i_mapping); ++ ret = ext4_mb_init_cache(page, bitmap); ++ if (ret) { ++ unlock_page(page); ++ goto err; ++ } ++ unlock_page(page); ++ } ++ if (page == NULL || !PageUptodate(page)) { ++ ret = -EIO; ++ goto err; ++ } ++ mark_page_accessed(page); ++err: ++ ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); ++ if (bitmap_page) ++ page_cache_release(bitmap_page); ++ if (page) ++ page_cache_release(page); ++ return ret; ++} ++ + static noinline_for_stack int + ext4_mb_regular_allocator(struct ext4_allocation_context *ac) + { +@@ -1777,7 +1968,7 @@ repeat: + group = 0; + + /* quick check to skip empty groups */ +- grp = ext4_get_group_info(ac->ac_sb, group); ++ grp = ext4_get_group_info(sb, group); + if (grp->bb_free == 0) + continue; + +@@ -1790,10 +1981,9 @@ repeat: + * we need full data about the group + * to make a good selection + */ +- err = ext4_mb_load_buddy(sb, group, &e4b); ++ err = ext4_mb_init_group(sb, group); + if (err) + goto out; +- ext4_mb_release_desc(&e4b); + } + + /* +@@ -2301,6 +2491,7 @@ int ext4_mb_add_groupinfo(struct super_b + } + + INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); ++ init_rwsem(&meta_group_info[i]->alloc_sem); + + #ifdef DOUBLE_CHECK + { +@@ -2327,54 +2518,6 @@ exit_meta_group_info: + } /* ext4_mb_add_groupinfo */ + + /* +- * Add a group to the existing groups. +- * This function is used for online resize +- */ +-int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group, +- struct ext4_group_desc *desc) +-{ +- struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct inode *inode = sbi->s_buddy_cache; +- int blocks_per_page; +- int block; +- int pnum; +- struct page *page; +- int err; +- +- /* Add group based on group descriptor*/ +- err = ext4_mb_add_groupinfo(sb, group, desc); +- if (err) +- return err; +- +- /* +- * Cache pages containing dynamic mb_alloc datas (buddy and bitmap +- * datas) are set not up to date so that they will be re-initilaized +- * during the next call to ext4_mb_load_buddy +- */ +- +- /* Set buddy page as not up to date */ +- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; +- block = group * 2; +- pnum = block / blocks_per_page; +- page = find_get_page(inode->i_mapping, pnum); +- if (page != NULL) { +- ClearPageUptodate(page); +- page_cache_release(page); +- } +- +- /* Set bitmap page as not up to date */ +- block++; +- pnum = block / blocks_per_page; +- page = find_get_page(inode->i_mapping, pnum); +- if (page != NULL) { +- ClearPageUptodate(page); +- page_cache_release(page); +- } +- +- return 0; +-} +- +-/* + * Update an existing group. + * This function is used for online resize + */ +@@ -4679,11 +4822,6 @@ do_more: + err = ext4_journal_get_write_access(handle, gd_bh); + if (err) + goto error_return; +- +- err = ext4_mb_load_buddy(sb, block_group, &e4b); +- if (err) +- goto error_return; +- + #ifdef AGGRESSIVE_CHECK + { + int i; +@@ -4697,6 +4835,8 @@ do_more: + /* We dirtied the bitmap block */ + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); + err = ext4_journal_dirty_metadata(handle, bitmap_bh); ++ if (err) ++ goto error_return; + + if (ac) { + ac->ac_b_ex.fe_group = block_group; +@@ -4705,6 +4845,9 @@ do_more: + ext4_mb_store_history(ac); + } + ++ err = ext4_mb_load_buddy(sb, block_group, &e4b); ++ if (err) ++ goto error_return; + if (metadata) { + /* blocks being freed are metadata. these blocks shouldn't + * be used until this transaction is committed */ +--- a/fs/ext4/mballoc.h ++++ b/fs/ext4/mballoc.h +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include "ext4_jbd2.h" + #include "ext4.h" + #include "group.h" +@@ -122,6 +123,7 @@ struct ext4_group_info { + #ifdef DOUBLE_CHECK + void *bb_bitmap; + #endif ++ struct rw_semaphore alloc_sem; + unsigned short bb_counters[]; + }; + +@@ -242,6 +244,7 @@ struct ext4_buddy { + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; ++ struct rw_semaphore *alloc_semp; + }; + #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) + #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -747,6 +747,7 @@ int ext4_group_add(struct super_block *s + struct inode *inode = NULL; + handle_t *handle; + int gdb_off, gdb_num; ++ int num_grp_locked = 0; + int err, err2; + + gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); +@@ -787,6 +788,7 @@ int ext4_group_add(struct super_block *s + } + } + ++ + if ((err = verify_group_input(sb, input))) + goto exit_put; + +@@ -855,6 +857,7 @@ int ext4_group_add(struct super_block *s + * using the new disk blocks. + */ + ++ num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group); + /* Update group descriptor block for new group */ + gdp = (struct ext4_group_desc *)((char *)primary->b_data + + gdb_off * EXT4_DESC_SIZE(sb)); +@@ -871,9 +874,11 @@ int ext4_group_add(struct super_block *s + * descriptor + */ + if (test_opt(sb, MBALLOC)) { +- err = ext4_mb_add_more_groupinfo(sb, input->group, gdp); +- if (err) ++ err = ext4_mb_add_groupinfo(sb, input->group, gdp); ++ if (err) { ++ ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked); + goto exit_journal; ++ } + } + /* + * Make the new blocks and inodes valid next. We do this before +@@ -915,6 +920,7 @@ int ext4_group_add(struct super_block *s + + /* Update the global fs size fields */ + sbi->s_groups_count++; ++ ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked); + + ext4_journal_dirty_metadata(handle, primary); + +@@ -1082,38 +1088,6 @@ int ext4_group_extend(struct super_block + if ((err = ext4_journal_stop(handle))) + goto exit_put; + +- /* +- * Mark mballoc pages as not up to date so that they will be updated +- * next time they are loaded by ext4_mb_load_buddy. +- */ +- if (test_opt(sb, MBALLOC)) { +- struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct inode *inode = sbi->s_buddy_cache; +- int blocks_per_page; +- int block; +- int pnum; +- struct page *page; +- +- /* Set buddy page as not up to date */ +- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; +- block = group * 2; +- pnum = block / blocks_per_page; +- page = find_get_page(inode->i_mapping, pnum); +- if (page != NULL) { +- ClearPageUptodate(page); +- page_cache_release(page); +- } +- +- /* Set bitmap page as not up to date */ +- block++; +- pnum = block / blocks_per_page; +- page = find_get_page(inode->i_mapping, pnum); +- if (page != NULL) { +- ClearPageUptodate(page); +- page_cache_release(page); +- } +- } +- + if (test_opt(sb, DEBUG)) + printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", + ext4_blocks_count(es)); diff --git a/queue-2.6.27/ext4-use-new-buffer_head-flag-to-check-uninit-group-bitmaps-initialization.patch b/queue-2.6.27/ext4-use-new-buffer_head-flag-to-check-uninit-group-bitmaps-initialization.patch new file mode 100644 index 00000000000..d6f43f3f730 --- /dev/null +++ b/queue-2.6.27/ext4-use-new-buffer_head-flag-to-check-uninit-group-bitmaps-initialization.patch @@ -0,0 +1,200 @@ +From tytso@mit.edu Wed Feb 18 11:11:52 2009 +From: Aneesh Kumar K.V +Date: Tue, 17 Feb 2009 10:58:36 -0500 +Subject: ext4: Use new buffer_head flag to check uninit group bitmaps initialization +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" , "Aneesh Kumar K.V" +Message-ID: <1234886324-15105-17-git-send-email-tytso@mit.edu> + + +From: Aneesh Kumar K.V + +(cherry picked from commit 2ccb5fb9f113dae969d1ae9b6c10e80fa34f8cd3) + +For uninit block group, the ondisk bitmap is not initialized. That implies +we cannot depend on the uptodate flag on the bitmap buffer_head to +find bitmap validity. Use a new buffer_head flag which would be set after +we properly initialize the bitmap. This also prevent the initializing +the uninit group bitmap initialization every time we do a +ext4_read_block_bitmap. + +Signed-off-by: Aneesh Kumar K.V +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/balloc.c | 25 ++++++++++++++++++++++++- + fs/ext4/ext4.h | 18 ++++++++++++++++++ + fs/ext4/ialloc.c | 24 +++++++++++++++++++++++- + fs/ext4/mballoc.c | 24 +++++++++++++++++++++++- + 4 files changed, 88 insertions(+), 3 deletions(-) + +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -319,18 +319,41 @@ ext4_read_block_bitmap(struct super_bloc + block_group, bitmap_blk); + return NULL; + } +- if (bh_uptodate_or_lock(bh)) ++ ++ if (bitmap_uptodate(bh)) + return bh; + ++ lock_buffer(bh); ++ if (bitmap_uptodate(bh)) { ++ unlock_buffer(bh); ++ return bh; ++ } + spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); + if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + ext4_init_block_bitmap(sb, bh, block_group, desc); ++ set_bitmap_uptodate(bh); + set_buffer_uptodate(bh); + unlock_buffer(bh); + spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); + return bh; + } + spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ++ if (buffer_uptodate(bh)) { ++ /* ++ * if not uninit if bh is uptodate, ++ * bitmap is also uptodate ++ */ ++ set_bitmap_uptodate(bh); ++ unlock_buffer(bh); ++ return bh; ++ } ++ /* ++ * submit the buffer_head for read. We can ++ * safely mark the bitmap as uptodate now. ++ * We do it here so the bitmap uptodate bit ++ * get set with buffer lock held. ++ */ ++ set_bitmap_uptodate(bh); + if (bh_submit_read(bh) < 0) { + put_bh(bh); + ext4_error(sb, __func__, +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include "ext4_i.h" + + /* +@@ -1250,6 +1251,23 @@ extern int ext4_get_blocks_wrap(handle_t + sector_t block, unsigned long max_blocks, + struct buffer_head *bh, int create, + int extend_disksize, int flag); ++/* ++ * Add new method to test wether block and inode bitmaps are properly ++ * initialized. With uninit_bg reading the block from disk is not enough ++ * to mark the bitmap uptodate. We need to also zero-out the bitmap ++ */ ++#define BH_BITMAP_UPTODATE BH_JBDPrivateStart ++ ++static inline int bitmap_uptodate(struct buffer_head *bh) ++{ ++ return (buffer_uptodate(bh) && ++ test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state)); ++} ++static inline void set_bitmap_uptodate(struct buffer_head *bh) ++{ ++ set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); ++} ++ + #endif /* __KERNEL__ */ + + #endif /* _EXT4_H */ +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -115,18 +115,40 @@ ext4_read_inode_bitmap(struct super_bloc + block_group, bitmap_blk); + return NULL; + } +- if (bh_uptodate_or_lock(bh)) ++ if (bitmap_uptodate(bh)) + return bh; + ++ lock_buffer(bh); ++ if (bitmap_uptodate(bh)) { ++ unlock_buffer(bh); ++ return bh; ++ } + spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); + if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { + ext4_init_inode_bitmap(sb, bh, block_group, desc); ++ set_bitmap_uptodate(bh); + set_buffer_uptodate(bh); + unlock_buffer(bh); + spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); + return bh; + } + spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ++ if (buffer_uptodate(bh)) { ++ /* ++ * if not uninit if bh is uptodate, ++ * bitmap is also uptodate ++ */ ++ set_bitmap_uptodate(bh); ++ unlock_buffer(bh); ++ return bh; ++ } ++ /* ++ * submit the buffer_head for read. We can ++ * safely mark the bitmap as uptodate now. ++ * We do it here so the bitmap uptodate bit ++ * get set with buffer lock held. ++ */ ++ set_bitmap_uptodate(bh); + if (bh_submit_read(bh) < 0) { + put_bh(bh); + ext4_error(sb, __func__, +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -796,20 +796,42 @@ static int ext4_mb_init_cache(struct pag + if (bh[i] == NULL) + goto out; + +- if (bh_uptodate_or_lock(bh[i])) ++ if (bitmap_uptodate(bh[i])) + continue; + ++ lock_buffer(bh[i]); ++ if (bitmap_uptodate(bh[i])) { ++ unlock_buffer(bh[i]); ++ continue; ++ } + spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); + if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + ext4_init_block_bitmap(sb, bh[i], + first_group + i, desc); ++ set_bitmap_uptodate(bh[i]); + set_buffer_uptodate(bh[i]); + unlock_buffer(bh[i]); + spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); + continue; + } + spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); ++ if (buffer_uptodate(bh[i])) { ++ /* ++ * if not uninit if bh is uptodate, ++ * bitmap is also uptodate ++ */ ++ set_bitmap_uptodate(bh[i]); ++ unlock_buffer(bh[i]); ++ continue; ++ } + get_bh(bh[i]); ++ /* ++ * submit the buffer_head for read. We can ++ * safely mark the bitmap as uptodate now. ++ * We do it here so the bitmap uptodate bit ++ * get set with buffer lock held. ++ */ ++ set_bitmap_uptodate(bh[i]); + bh[i]->b_end_io = end_buffer_read_sync; + submit_bh(READ, bh[i]); + mb_debug("read bitmap for group %lu\n", first_group + i); diff --git a/queue-2.6.27/ext4-widen-type-of-ext4_sb_info.s_mb_maxs.patch b/queue-2.6.27/ext4-widen-type-of-ext4_sb_info.s_mb_maxs.patch new file mode 100644 index 00000000000..1f42d07f050 --- /dev/null +++ b/queue-2.6.27/ext4-widen-type-of-ext4_sb_info.s_mb_maxs.patch @@ -0,0 +1,57 @@ +From tytso@mit.edu Wed Feb 18 11:07:37 2009 +From: Yasunori Goto +Date: Tue, 17 Feb 2009 10:58:25 -0500 +Subject: ext4: Widen type of ext4_sb_info.s_mb_maxs[] +To: stable@kernel.org +Cc: Li Zefan , Yasunori Goto , linux-ext4@vger.kernel.org, "Theodore Ts'o" , Miao Xie +Message-ID: <1234886324-15105-6-git-send-email-tytso@mit.edu> + + +From: Yasunori Goto + +(cherry picked from commit ff7ef329b268b603ea4a2303241ef1c3829fd574) + +I chased the cause of following ext4 oops report which is tested on +ia64 box. + +http://bugzilla.kernel.org/show_bug.cgi?id=12018 + +The cause is the size of s_mb_maxs array that is defined as "unsigned +short" in ext4_sb_info structure. If the file system's block size is +8k or greater, an unsigned short is not wide enough to contain the +value fs->blocksize << 3. + +Signed-off-by: Yasunori Goto +Signed-off-by: "Theodore Ts'o" +Cc: Li Zefan +Cc: Miao Xie +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ext4/ext4_sb.h | 3 ++- + fs/ext4/mballoc.c | 2 ++ + 2 files changed, 4 insertions(+), 1 deletion(-) + +--- a/fs/ext4/ext4_sb.h ++++ b/fs/ext4/ext4_sb.h +@@ -103,7 +103,8 @@ struct ext4_sb_info { + struct list_head s_committed_transaction; + spinlock_t s_md_lock; + tid_t s_last_transaction; +- unsigned short *s_mb_offsets, *s_mb_maxs; ++ unsigned short *s_mb_offsets; ++ unsigned int *s_mb_maxs; + + /* tunables */ + unsigned long s_stripe; +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2495,6 +2495,8 @@ int ext4_mb_init(struct super_block *sb, + clear_opt(sbi->s_mount_opt, MBALLOC); + return -ENOMEM; + } ++ ++ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int); + sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); + if (sbi->s_mb_maxs == NULL) { + clear_opt(sbi->s_mount_opt, MBALLOC); diff --git a/queue-2.6.27/jbd2-add-barrier-not-supported-test-to-journal_wait_on_commit_record.patch b/queue-2.6.27/jbd2-add-barrier-not-supported-test-to-journal_wait_on_commit_record.patch new file mode 100644 index 00000000000..e869f9832d1 --- /dev/null +++ b/queue-2.6.27/jbd2-add-barrier-not-supported-test-to-journal_wait_on_commit_record.patch @@ -0,0 +1,84 @@ +From tytso@mit.edu Wed Feb 18 11:08:02 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:26 -0500 +Subject: jbd2: Add barrier not supported test to journal_wait_on_commit_record +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-7-git-send-email-tytso@mit.edu> + + +From: "Theodore Ts'o" + +(cherry picked from commit fd98496f467b3d26d05ab1498f41718b5ef13de5) + +Xen doesn't report that barriers are not supported until buffer I/O is +reported as completed, instead of when the buffer I/O is submitted. +Add a check and a fallback codepath to journal_wait_on_commit_record() +to detect this case, so that attempts to mount ext4 filesystems on +LVM/devicemapper devices on Xen guests don't blow up with an "Aborting +journal on device XXX"; "Remounting filesystem read-only" error. + +Thanks to Andreas Sundstrom for reporting this issue. + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + fs/jbd2/commit.c | 27 +++++++++++++++++++++++++-- + 1 file changed, 25 insertions(+), 2 deletions(-) + +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + /* + * Default IO end handler for temporary BJ_IO buffer_heads. +@@ -170,12 +171,34 @@ static int journal_submit_commit_record( + * This function along with journal_submit_commit_record + * allows to write the commit record asynchronously. + */ +-static int journal_wait_on_commit_record(struct buffer_head *bh) ++static int journal_wait_on_commit_record(journal_t *journal, ++ struct buffer_head *bh) + { + int ret = 0; + ++retry: + clear_buffer_dirty(bh); + wait_on_buffer(bh); ++ if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) { ++ printk(KERN_WARNING ++ "JBD2: wait_on_commit_record: sync failed on %s - " ++ "disabling barriers\n", journal->j_devname); ++ spin_lock(&journal->j_state_lock); ++ journal->j_flags &= ~JBD2_BARRIER; ++ spin_unlock(&journal->j_state_lock); ++ ++ lock_buffer(bh); ++ clear_buffer_dirty(bh); ++ set_buffer_uptodate(bh); ++ bh->b_end_io = journal_end_buffer_io_sync; ++ ++ ret = submit_bh(WRITE_SYNC, bh); ++ if (ret) { ++ unlock_buffer(bh); ++ return ret; ++ } ++ goto retry; ++ } + + if (unlikely(!buffer_uptodate(bh))) + ret = -EIO; +@@ -795,7 +818,7 @@ wait_for_iobuf: + __jbd2_journal_abort_hard(journal); + } + if (!err && !is_journal_aborted(journal)) +- err = journal_wait_on_commit_record(cbh); ++ err = journal_wait_on_commit_record(journal, cbh); + + if (err) + jbd2_journal_abort(journal, err); diff --git a/queue-2.6.27/jbd2-add-bh_jbdprivatestart.patch b/queue-2.6.27/jbd2-add-bh_jbdprivatestart.patch new file mode 100644 index 00000000000..5a32bd552bb --- /dev/null +++ b/queue-2.6.27/jbd2-add-bh_jbdprivatestart.patch @@ -0,0 +1,37 @@ +From tytso@mit.edu Wed Feb 18 11:11:31 2009 +From: Mark Fasheh +Date: Tue, 17 Feb 2009 10:58:35 -0500 +Subject: jbd2: Add BH_JBDPrivateStart +To: stable@kernel.org +Cc: Mark Fasheh , linux-ext4@vger.kernel.org +Message-ID: <1234886324-15105-16-git-send-email-tytso@mit.edu> + + +From: Mark Fasheh + +(cherry picked from commit e97fcd95a4778a8caf1980c6c72fdf68185a0838) + +Add this so that file systems using JBD2 can safely allocate unused b_state +bits. + +In this case, we add it so that Ocfs2 can define a single bit for tracking +the validation state of a buffer. + +Acked-by: "Theodore Ts'o" +Signed-off-by: Mark Fasheh +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/jbd2.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -329,6 +329,7 @@ enum jbd_state_bits { + BH_State, /* Pins most journal_head state */ + BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ + BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ ++ BH_JBDPrivateStart, /* First bit available for private use by FS */ + }; + + BUFFER_FNS(JBD, jbd) diff --git a/queue-2.6.27/jbd2-on-a-__journal_expect-assertion-failure-printk-jbd2-not-ext3-fs.patch b/queue-2.6.27/jbd2-on-a-__journal_expect-assertion-failure-printk-jbd2-not-ext3-fs.patch new file mode 100644 index 00000000000..7e2eaf2bd23 --- /dev/null +++ b/queue-2.6.27/jbd2-on-a-__journal_expect-assertion-failure-printk-jbd2-not-ext3-fs.patch @@ -0,0 +1,35 @@ +From tytso@mit.edu Wed Feb 18 11:14:19 2009 +From: "Theodore Ts'o" +Date: Tue, 17 Feb 2009 10:58:43 -0500 +Subject: jbd2: On a __journal_expect() assertion failure printk "JBD2", not "EXT3-fs" +To: stable@kernel.org +Cc: linux-ext4@vger.kernel.org, "Theodore Ts'o" +Message-ID: <1234886324-15105-24-git-send-email-tytso@mit.edu> + +From: "Theodore Ts'o" + +(cherry picked from commit 08ec8c3878cea0bf91f2ba3c0badf44b383752d0) + +Otherwise it can be very confusing to find a "EXT3-fs: " failure in +the middle of EXT4-fs failures, and it makes it harder to track the +source of the failure. + +Signed-off-by: "Theodore Ts'o" +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/jbd2.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buf + int val = (expr); \ + if (!val) { \ + printk(KERN_ERR \ +- "EXT3-fs unexpected failure: %s;\n",# expr); \ ++ "JBD2 unexpected failure: %s: %s;\n", \ ++ __func__, #expr); \ + printk(KERN_ERR why "\n"); \ + } \ + val; \ diff --git a/queue-2.6.27/series b/queue-2.6.27/series index a29dd32594e..422f7972888 100644 --- a/queue-2.6.27/series +++ b/queue-2.6.27/series @@ -10,3 +10,27 @@ btsdio-free-sk_buff-with-kfree_skb.patch ext2-xip-refuse-to-change-xip-flag-during-remount-with-busy-inodes.patch scsi-libiscsi-fix-iscsi-pool-leak.patch x86-cpa-make-sure-cpa-is-safe-to-call-in-lazy-mmu-mode.patch +ext4-add-support-for-non-native-signed-unsigned-htree-hash-algorithms.patch +ext4-tone-down-ext4_da_writepages-warnings.patch +ext4-fix-the-delalloc-writepages-to-allocate-blocks-at-the-right-offset.patch +ext4-avoid-ext4_error-when-mounting-a-fs-with-a-single-bg.patch +ext4-widen-type-of-ext4_sb_info.s_mb_maxs.patch +jbd2-add-barrier-not-supported-test-to-journal_wait_on_commit_record.patch +ext4-don-t-overwrite-allocation_context-ac_status.patch +ext4-add-blocks-added-during-resize-to-bitmap.patch +ext4-use-ext4_group_info_need_init_bit-during-resize.patch +ext4-cleanup-mballoc-header-files.patch +ext4-use-an-rbtree-for-tracking-blocks-freed-during-transaction.patch +ext4-don-t-use-blocks-freed-but-not-yet-committed-in-buddy-cache-init.patch +ext4-fix-race-between-read_block_bitmap-and-mark_diskspace_used.patch +ext4-fix-the-race-between-read_inode_bitmap-and-ext4_new_inode.patch +jbd2-add-bh_jbdprivatestart.patch +ext4-use-new-buffer_head-flag-to-check-uninit-group-bitmaps-initialization.patch +ext4-mark-the-blocks-inode-bitmap-beyond-end-of-group-as-used.patch +ext4-don-t-allow-new-groups-to-be-added-during-block-allocation.patch +ext4-init-the-complete-page-while-building-buddy-cache.patch +ext4-add-sanity-checks-for-the-superblock-before-mounting-the-filesystem.patch +ext4-only-use-i_size_high-for-regular-files.patch +ext4-add-sanity-check-to-make_indexed_dir.patch +jbd2-on-a-__journal_expect-assertion-failure-printk-jbd2-not-ext3-fs.patch +ext4-initialize-the-new-group-descriptor-when-resizing-the-filesystem.patch