--- /dev/null
+From f5a44db5d2d677dfbf12deee461f85e9ec633961 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Fri, 20 Dec 2013 09:29:35 -0500
+Subject: ext4: add explicit casts when masking cluster sizes
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit f5a44db5d2d677dfbf12deee461f85e9ec633961 upstream.
+
+The missing casts can cause the high 64-bits of the physical blocks to
+be lost. Set up new macros which allows us to make sure the right
+thing happen, even if at some point we end up supporting larger
+logical block numbers.
+
+Thanks to the Emese Revfy and the PaX security team for reporting this
+issue.
+
+Reported-by: PaX Team <pageexec@freemail.hu>
+Reported-by: Emese Revfy <re.emese@gmail.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4.h | 10 ++++++++++
+ fs/ext4/extents.c | 24 +++++++++++-------------
+ fs/ext4/mballoc.c | 6 +++---
+ 3 files changed, 24 insertions(+), 16 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -280,6 +280,16 @@ struct ext4_io_submit {
+ /* Translate # of blks to # of clusters */
+ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
+ (sbi)->s_cluster_bits)
++/* Mask out the low bits to get the starting block of the cluster */
++#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
++ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
++ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
++/* Get the cluster offset */
++#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
++ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
++ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+
+ /*
+ * Structure of a blocks group descriptor
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1772,8 +1772,7 @@ static unsigned int ext4_ext_check_overl
+ depth = ext_depth(inode);
+ if (!path[depth].p_ext)
+ goto out;
+- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
+
+ /*
+ * get the next allocated block if the extent in the path
+@@ -1783,7 +1782,7 @@ static unsigned int ext4_ext_check_overl
+ b2 = ext4_ext_next_allocated_block(path);
+ if (b2 == EXT_MAX_BLOCKS)
+ goto out;
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, b2);
+ }
+
+ /* check for wrap through zero on extent logical start block*/
+@@ -2444,7 +2443,7 @@ static int ext4_remove_blocks(handle_t *
+ * truncate operation has removed all of the blocks in
+ * the cluster.
+ */
+- if (pblk & (sbi->s_cluster_ratio - 1) &&
++ if (EXT4_PBLK_COFF(sbi, pblk) &&
+ (ee_len == num))
+ *partial_cluster = EXT4_B2C(sbi, pblk);
+ else
+@@ -3675,7 +3674,7 @@ int ext4_find_delalloc_cluster(struct in
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
++ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
+@@ -3734,9 +3733,9 @@ get_reserved_cluster_alloc(struct inode
+ trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
+ /* Check towards left side */
+- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
+ if (c_offset) {
+- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
++ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
+ lblk_to = lblk_from + c_offset - 1;
+
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
+@@ -3744,7 +3743,7 @@ get_reserved_cluster_alloc(struct inode
+ }
+
+ /* Now check towards right. */
+- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
+ if (allocated_clusters && c_offset) {
+ lblk_from = lblk_start + num_blks;
+ lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
+@@ -3952,7 +3951,7 @@ static int get_implied_cluster_alloc(str
+ struct ext4_ext_path *path)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ext4_lblk_t ex_cluster_start, ex_cluster_end;
+ ext4_lblk_t rr_cluster_start;
+ ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
+@@ -3970,8 +3969,7 @@ static int get_implied_cluster_alloc(str
+ (rr_cluster_start == ex_cluster_start)) {
+ if (rr_cluster_start == ex_cluster_end)
+ ee_start += ee_len - 1;
+- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
+- c_offset;
++ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
+ map->m_len = min(map->m_len,
+ (unsigned) sbi->s_cluster_ratio - c_offset);
+ /*
+@@ -4125,7 +4123,7 @@ int ext4_ext_map_blocks(handle_t *handle
+ */
+ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
+ newex.ee_block = cpu_to_le32(map->m_lblk);
+- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+
+ /*
+ * If we are doing bigalloc, check to see if the extent returned
+@@ -4193,7 +4191,7 @@ int ext4_ext_map_blocks(handle_t *handle
+ * needed so that future calls to get_implied_cluster_alloc()
+ * work correctly.
+ */
+- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
++ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
+ ar.goal -= offset;
+ ar.logical -= offset;
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4107,7 +4107,7 @@ ext4_mb_initialize_context(struct ext4_a
+ ext4_get_group_no_and_offset(sb, goal, &group, &block);
+
+ /* set up allocation goals */
+- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
++ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_sb = sb;
+ ac->ac_inode = ar->inode;
+@@ -4644,7 +4644,7 @@ void ext4_free_blocks(handle_t *handle,
+ * blocks at the beginning or the end unless we are explicitly
+ * requested to avoid doing so.
+ */
+- overflow = block & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_PBLK_COFF(sbi, block);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
+ overflow = sbi->s_cluster_ratio - overflow;
+@@ -4658,7 +4658,7 @@ void ext4_free_blocks(handle_t *handle,
+ count += overflow;
+ }
+ }
+- overflow = count & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_LBLK_COFF(sbi, count);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
+ if (count > overflow)
--- /dev/null
+From ae1495b12df1897d4f42842a7aa7276d920f6290 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Mon, 2 Dec 2013 09:31:36 -0500
+Subject: ext4: call ext4_error_inode() if jbd2_journal_dirty_metadata() fails
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit ae1495b12df1897d4f42842a7aa7276d920f6290 upstream.
+
+While it's true that errors can only happen if there is a bug in
+jbd2_journal_dirty_metadata(), if a bug does happen, we need to halt
+the kernel or remount the file system read-only in order to avoid
+further data loss. The ext4_journal_abort_handle() function doesn't
+do any of this, and while it's likely that this call (since it doesn't
+adjust refcounts) will likely result in the file system eventually
+deadlocking since the current transaction will never be able to close,
+it's much cleaner to call let ext4's error handling system deal with
+this situation.
+
+There's a separate bug here which is that if certain jbd2 errors
+errors occur and file system is mounted errors=continue, the file
+system will probably eventually end grind to a halt as described
+above. But things have been this way in a long time, and usually when
+we have these sorts of errors it's pretty much a disaster --- and
+that's why the jbd2 layer aggressively retries memory allocations,
+which is the most likely cause of these jbd2 errors.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/ext4_jbd2.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -223,6 +223,15 @@ int __ext4_handle_dirty_metadata(const c
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
++ ext4_error_inode(inode, where, line,
++ bh->b_blocknr,
++ "journal_dirty_metadata failed: "
++ "handle type %u started at line %u, "
++ "credits %u/%u, errcode %d",
++ handle->h_type,
++ handle->h_line_no,
++ handle->h_requested_credits,
++ handle->h_buffer_credits, err);
+ }
+ } else {
+ if (inode)
--- /dev/null
+From 5946d089379a35dda0e531710b48fca05446a196 Mon Sep 17 00:00:00 2001
+From: Eryu Guan <guaneryu@gmail.com>
+Date: Tue, 3 Dec 2013 21:22:21 -0500
+Subject: ext4: check for overlapping extents in ext4_valid_extent_entries()
+
+From: Eryu Guan <guaneryu@gmail.com>
+
+commit 5946d089379a35dda0e531710b48fca05446a196 upstream.
+
+A corrupted ext4 may have out of order leaf extents, i.e.
+
+extent: lblk 0--1023, len 1024, pblk 9217, flags: LEAF UNINIT
+extent: lblk 1000--2047, len 1024, pblk 10241, flags: LEAF UNINIT
+ ^^^^ overlap with previous extent
+
+Reading such extent could hit BUG_ON() in ext4_es_cache_extent().
+
+ BUG_ON(end < lblk);
+
+The problem is that __read_extent_tree_block() tries to cache holes as
+well but assumes 'lblk' is greater than 'prev' and passes underflowed
+length to ext4_es_cache_extent(). Fix it by checking for overlapping
+extents in ext4_valid_extent_entries().
+
+I hit this when fuzz testing ext4, and am able to reproduce it by
+modifying the on-disk extent by hand.
+
+Also add the check for (ee_block + len - 1) in ext4_valid_extent() to
+make sure the value is not overflow.
+
+Ran xfstests on patched ext4 and no regression.
+
+Cc: Lukáš Czerner <lczerner@redhat.com>
+Signed-off-by: Eryu Guan <guaneryu@gmail.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/extents.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inod
+ {
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
+ int len = ext4_ext_get_actual_len(ext);
++ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
++ ext4_lblk_t last = lblock + len - 1;
+
+- if (len == 0)
++ if (lblock > last)
+ return 0;
+ return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(str
+ if (depth == 0) {
+ /* leaf entries */
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
++ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
++ ext4_fsblk_t pblock = 0;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++ int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
++
++ /* Check for overlapping extents */
++ lblock = le32_to_cpu(ext->ee_block);
++ len = ext4_ext_get_actual_len(ext);
++ if ((lblock <= prev) && prev) {
++ pblock = ext4_ext_pblock(ext);
++ es->s_last_error_block = cpu_to_le64(pblock);
++ return 0;
++ }
+ ext++;
+ entries--;
++ prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
--- /dev/null
+From 30fac0f75da24dd5bb43c9e911d2039a984ac815 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Sun, 8 Dec 2013 21:11:59 -0500
+Subject: ext4: Do not reserve clusters when fs doesn't support extents
+
+From: Jan Kara <jack@suse.cz>
+
+commit 30fac0f75da24dd5bb43c9e911d2039a984ac815 upstream.
+
+When the filesystem doesn't support extents (like in ext2/3
+compatibility modes), there is no need to reserve any clusters. Space
+estimates for writing are exact, hole punching doesn't need new
+metadata, and there are no unwritten extents to convert.
+
+This fixes a problem when filesystem still having some free space when
+accessed with a native ext2/3 driver suddently reports ENOSPC when
+accessed with ext4 driver.
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Reviewed-by: Lukas Czerner <lczerner@redhat.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3213,11 +3213,19 @@ int ext4_calculate_overhead(struct super
+ }
+
+
+-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
++static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
+ {
+ ext4_fsblk_t resv_clusters;
+
+ /*
++ * There's no need to reserve anything when we aren't using extents.
++ * The space estimates are exact, there are no unwritten extents,
++ * hole punching doesn't need new metadata... This is needed especially
++ * to keep ext2/3 backward compatibility.
++ */
++ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
++ return 0;
++ /*
+ * By default we reserve 2% or 4096 clusters, whichever is smaller.
+ * This should cover the situations where we can not afford to run
+ * out of space like for example punch hole, or converting
+@@ -3225,7 +3233,8 @@ static ext4_fsblk_t ext4_calculate_resv_
+ * allocation would require 1, or 2 blocks, higher numbers are
+ * very rare.
+ */
+- resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
++ resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
++ EXT4_SB(sb)->s_cluster_bits;
+
+ do_div(resv_clusters, 50);
+ resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
+@@ -3969,10 +3978,10 @@ no_journal:
+ "available");
+ }
+
+- err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
++ err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
+- "reserved pool", ext4_calculate_resv_clusters(sbi));
++ "reserved pool", ext4_calculate_resv_clusters(sb));
+ goto failed_mount4a;
+ }
+
--- /dev/null
+From 34cf865d54813aab3497838132fb1bbd293f4054 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 18 Dec 2013 00:44:44 -0500
+Subject: ext4: fix deadlock when writing in ENOSPC conditions
+
+From: Jan Kara <jack@suse.cz>
+
+commit 34cf865d54813aab3497838132fb1bbd293f4054 upstream.
+
+Akira-san has been reporting rare deadlocks of his machine when running
+xfstests test 269 on ext4 filesystem. The problem turned out to be in
+ext4_da_reserve_metadata() and ext4_da_reserve_space() which called
+ext4_should_retry_alloc() while holding i_data_sem. Since
+ext4_should_retry_alloc() can force a transaction commit, this is a
+lock ordering violation and leads to deadlocks.
+
+Fix the problem by just removing the retry loops. These functions should
+just report ENOSPC to the caller (e.g. ext4_da_write_begin()) and that
+function must take care of retrying after dropping all necessary locks.
+
+Reported-and-tested-by: Akira Fujita <a-fujita@rs.jp.nec.com>
+Reviewed-by: Zheng Liu <wenqing.lz@taobao.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1263,7 +1263,6 @@ static int ext4_journalled_write_end(str
+ */
+ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
+ {
+- int retries = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+@@ -1275,7 +1274,6 @@ static int ext4_da_reserve_metadata(stru
+ * in order to allocate nrblocks
+ * worse case is one extent per block
+ */
+-repeat:
+ spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+@@ -1295,10 +1293,6 @@ repeat:
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
+- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+- cond_resched();
+- goto repeat;
+- }
+ return -ENOSPC;
+ }
+ ei->i_reserved_meta_blocks += md_needed;
+@@ -1312,7 +1306,6 @@ repeat:
+ */
+ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ {
+- int retries = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+@@ -1334,7 +1327,6 @@ static int ext4_da_reserve_space(struct
+ * in order to allocate nrblocks
+ * worse case is one extent per block
+ */
+-repeat:
+ spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+@@ -1354,10 +1346,6 @@ repeat:
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
+- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+- cond_resched();
+- goto repeat;
+- }
+ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ return -ENOSPC;
+ }
--- /dev/null
+From 8f9ff189205a6817aee5a1f996f876541f86e07c Mon Sep 17 00:00:00 2001
+From: Lukas Czerner <lczerner@redhat.com>
+Date: Wed, 30 Oct 2013 11:10:52 -0400
+Subject: ext4: fix FITRIM in no journal mode
+
+From: Lukas Czerner <lczerner@redhat.com>
+
+commit 8f9ff189205a6817aee5a1f996f876541f86e07c upstream.
+
+When using FITRIM ioctl on a file system without journal it will
+only trim the block group once, no matter how many times you invoke
+FITRIM ioctl and how many block you release from the block group.
+
+It is because we only clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT in journal
+callback. Fix this by clearing the bit in no journal mode as well.
+
+Signed-off-by: Lukas Czerner <lczerner@redhat.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Reported-by: Jorge Fábregas <jorge.fabregas@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/mballoc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4771,8 +4771,8 @@ do_more:
+ " group:%d block:%d count:%lu failed"
+ " with %d", block_group, bit, count,
+ err);
+- }
+-
++ } else
++ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+
+ ext4_lock_group(sb, block_group);
+ mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
--- /dev/null
+From 4e8d2139802ce4f41936a687f06c560b12115247 Mon Sep 17 00:00:00 2001
+From: Junho Ryu <jayr@google.com>
+Date: Tue, 3 Dec 2013 18:10:28 -0500
+Subject: ext4: fix use-after-free in ext4_mb_new_blocks
+
+From: Junho Ryu <jayr@google.com>
+
+commit 4e8d2139802ce4f41936a687f06c560b12115247 upstream.
+
+ext4_mb_put_pa should hold pa->pa_lock before accessing pa->pa_count.
+While ext4_mb_use_preallocated checks pa->pa_deleted first and then
+increments pa->count later, ext4_mb_put_pa decrements pa->pa_count
+before holding pa->pa_lock and then sets pa->pa_deleted.
+
+* Free sequence
+ext4_mb_put_pa (1): atomic_dec_and_test pa->pa_count
+ext4_mb_put_pa (2): lock pa->pa_lock
+ext4_mb_put_pa (3): check pa->pa_deleted
+ext4_mb_put_pa (4): set pa->pa_deleted=1
+ext4_mb_put_pa (5): unlock pa->pa_lock
+ext4_mb_put_pa (6): remove pa from a list
+ext4_mb_pa_callback: free pa
+
+* Use sequence
+ext4_mb_use_preallocated (1): iterate over preallocation
+ext4_mb_use_preallocated (2): lock pa->pa_lock
+ext4_mb_use_preallocated (3): check pa->pa_deleted
+ext4_mb_use_preallocated (4): increase pa->pa_count
+ext4_mb_use_preallocated (5): unlock pa->pa_lock
+ext4_mb_release_context: access pa
+
+* Use-after-free sequence
+[initial status] <pa->pa_deleted = 0, pa_count = 1>
+ext4_mb_use_preallocated (1): iterate over preallocation
+ext4_mb_use_preallocated (2): lock pa->pa_lock
+ext4_mb_use_preallocated (3): check pa->pa_deleted
+ext4_mb_put_pa (1): atomic_dec_and_test pa->pa_count
+[pa_count decremented] <pa->pa_deleted = 0, pa_count = 0>
+ext4_mb_use_preallocated (4): increase pa->pa_count
+[pa_count incremented] <pa->pa_deleted = 0, pa_count = 1>
+ext4_mb_use_preallocated (5): unlock pa->pa_lock
+ext4_mb_put_pa (2): lock pa->pa_lock
+ext4_mb_put_pa (3): check pa->pa_deleted
+ext4_mb_put_pa (4): set pa->pa_deleted=1
+[race condition!] <pa->pa_deleted = 1, pa_count = 1>
+ext4_mb_put_pa (5): unlock pa->pa_lock
+ext4_mb_put_pa (6): remove pa from a list
+ext4_mb_pa_callback: free pa
+ext4_mb_release_context: access pa
+
+AddressSanitizer has detected use-after-free in ext4_mb_new_blocks
+Bug report: http://goo.gl/rG1On3
+
+Signed-off-by: Junho Ryu <jayr@google.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/mballoc.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3423,6 +3423,9 @@ static void ext4_mb_pa_callback(struct r
+ {
+ struct ext4_prealloc_space *pa;
+ pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
++
++ BUG_ON(atomic_read(&pa->pa_count));
++ BUG_ON(pa->pa_deleted == 0);
+ kmem_cache_free(ext4_pspace_cachep, pa);
+ }
+
+@@ -3436,11 +3439,13 @@ static void ext4_mb_put_pa(struct ext4_a
+ ext4_group_t grp;
+ ext4_fsblk_t grp_blk;
+
+- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
+- return;
+-
+ /* in this short window concurrent discard can set pa_deleted */
+ spin_lock(&pa->pa_lock);
++ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
++ spin_unlock(&pa->pa_lock);
++ return;
++ }
++
+ if (pa->pa_deleted == 1) {
+ spin_unlock(&pa->pa_lock);
+ return;
arm64-spinlock-retry-trylock-operation-if-strex-fails-on-free-lock.patch
arm-omap2-hwmod_data-fix-missing-omap_intc_start-in-irq-data.patch
x86-idle-repair-large-server-50-watt-idle-power-regression.patch
+ext4-call-ext4_error_inode-if-jbd2_journal_dirty_metadata-fails.patch
+ext4-fix-use-after-free-in-ext4_mb_new_blocks.patch
+ext4-check-for-overlapping-extents-in-ext4_valid_extent_entries.patch
+ext4-do-not-reserve-clusters-when-fs-doesn-t-support-extents.patch
+ext4-fix-deadlock-when-writing-in-enospc-conditions.patch
+ext4-add-explicit-casts-when-masking-cluster-sizes.patch
+ext4-fix-fitrim-in-no-journal-mode.patch