--- /dev/null
+From d8fe29e9dea8d7d61fd140d8779326856478fc62 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Fri, 29 Mar 2013 08:09:34 -0600
+Subject: Btrfs: don't drop path when printing out tree errors in scrub
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit d8fe29e9dea8d7d61fd140d8779326856478fc62 upstream.
+
+A user reported a panic where we were panicing somewhere in
+tree_backref_for_extent from scrub_print_warning. He only captured the trace
+but looking at scrub_print_warning we drop the path right before we mess with
+the extent buffer to print out a bunch of stuff, which isn't right. So fix this
+by dropping the path after we use the eb if we need to. Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/scrub.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -541,7 +541,6 @@ static void scrub_print_warning(const ch
+ eb = path->nodes[0];
+ ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+ item_size = btrfs_item_size_nr(eb, path->slots[0]);
+- btrfs_release_path(path);
+
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ do {
+@@ -557,7 +556,9 @@ static void scrub_print_warning(const ch
+ ret < 0 ? -1 : ref_level,
+ ret < 0 ? -1 : ref_root);
+ } while (ret != 1);
++ btrfs_release_path(path);
+ } else {
++ btrfs_release_path(path);
+ swarn.path = path;
+ swarn.dev = dev;
+ iterate_extent_inodes(fs_info, found_key.objectid,
--- /dev/null
+From d9abbf1c3131b679379762700201ae69367f3f62 Mon Sep 17 00:00:00 2001
+From: Jan Schmidt <list.btrfs@jan-o-sch.net>
+Date: Wed, 20 Mar 2013 13:49:48 +0000
+Subject: Btrfs: fix locking on ROOT_REPLACE operations in tree mod log
+
+From: Jan Schmidt <list.btrfs@jan-o-sch.net>
+
+commit d9abbf1c3131b679379762700201ae69367f3f62 upstream.
+
+To resolve backrefs, ROOT_REPLACE operations in the tree mod log are
+required to be tied to at least one KEY_REMOVE_WHILE_FREEING operation.
+Therefore, those operations must be enclosed by tree_mod_log_write_lock()
+and tree_mod_log_write_unlock() calls.
+
+Those calls are private to the tree_mod_log_* functions, which means that
+removal of the elements of an old root node must be logged from
+tree_mod_log_insert_root. This partly reverts and corrects commit ba1bfbd5
+(Btrfs: fix a tree mod logging issue for root replacement operations).
+
+This fixes the brand-new version of xfstest 276 as of commit cfe73f71.
+
+Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ctree.c | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs
+ if (tree_mod_dont_log(fs_info, NULL))
+ return 0;
+
++ __tree_mod_log_free_eb(fs_info, old_root);
++
+ ret = tree_mod_alloc(fs_info, flags, &tm);
+ if (ret < 0)
+ goto out;
+@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info
+ static noinline void
+ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
+ struct extent_buffer *src, unsigned long dst_offset,
+- unsigned long src_offset, int nr_items)
++ unsigned long src_offset, int nr_items, int log_removal)
+ {
+ int ret;
+ int i;
+@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_inf
+ }
+
+ for (i = 0; i < nr_items; i++) {
+- ret = tree_mod_log_insert_key_locked(fs_info, src,
+- i + src_offset,
+- MOD_LOG_KEY_REMOVE);
+- BUG_ON(ret < 0);
++ if (log_removal) {
++ ret = tree_mod_log_insert_key_locked(fs_info, src,
++ i + src_offset,
++ MOD_LOG_KEY_REMOVE);
++ BUG_ON(ret < 0);
++ }
+ ret = tree_mod_log_insert_key_locked(fs_info, dst,
+ i + dst_offset,
+ MOD_LOG_KEY_ADD);
+@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(s
+ ret = btrfs_dec_ref(trans, root, buf, 1, 1);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+- tree_mod_log_free_eb(root->fs_info, buf);
+ clean_tree_block(trans, root, buf);
+ *last_ref = 1;
+ }
+@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(st
+ btrfs_set_node_ptr_generation(parent, parent_slot,
+ trans->transid);
+ btrfs_mark_buffer_dirty(parent);
++ tree_mod_log_free_eb(root->fs_info, buf);
+ btrfs_free_tree_block(trans, root, buf, parent_start,
+ last_ref);
+ }
+@@ -1755,7 +1759,6 @@ static noinline int balance_level(struct
+ goto enospc;
+ }
+
+- tree_mod_log_free_eb(root->fs_info, root->node);
+ tree_mod_log_set_root_pointer(root, child);
+ rcu_assign_pointer(root->node, child);
+
+@@ -3000,7 +3003,7 @@ static int push_node_left(struct btrfs_t
+ push_items = min(src_nritems - 8, push_items);
+
+ tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+- push_items);
++ push_items, 1);
+ copy_extent_buffer(dst, src,
+ btrfs_node_key_ptr_offset(dst_nritems),
+ btrfs_node_key_ptr_offset(0),
+@@ -3071,7 +3074,7 @@ static int balance_node_right(struct btr
+ sizeof(struct btrfs_key_ptr));
+
+ tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+- src_nritems - push_items, push_items);
++ src_nritems - push_items, push_items, 1);
+ copy_extent_buffer(dst, src,
+ btrfs_node_key_ptr_offset(0),
+ btrfs_node_key_ptr_offset(src_nritems - push_items),
+@@ -3223,12 +3226,18 @@ static noinline int split_node(struct bt
+ int mid;
+ int ret;
+ u32 c_nritems;
++ int tree_mod_log_removal = 1;
+
+ c = path->nodes[level];
+ WARN_ON(btrfs_header_generation(c) != trans->transid);
+ if (c == root->node) {
+ /* trying to split the root, lets make a new one */
+ ret = insert_new_root(trans, root, path, level + 1);
++ /*
++ * removal of root nodes has been logged by
++ * tree_mod_log_set_root_pointer due to locking
++ */
++ tree_mod_log_removal = 0;
+ if (ret)
+ return ret;
+ } else {
+@@ -3266,7 +3275,8 @@ static noinline int split_node(struct bt
+ (unsigned long)btrfs_header_chunk_tree_uuid(split),
+ BTRFS_UUID_SIZE);
+
+- tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
++ tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
++ tree_mod_log_removal);
+ copy_extent_buffer(split, c,
+ btrfs_node_key_ptr_offset(0),
+ btrfs_node_key_ptr_offset(mid),
--- /dev/null
+From 4adaa611020fa6ac65b0ac8db78276af4ec04e63 Mon Sep 17 00:00:00 2001
+From: Chris Mason <chris.mason@fusionio.com>
+Date: Tue, 26 Mar 2013 13:07:00 -0400
+Subject: Btrfs: fix race between mmap writes and compression
+
+From: Chris Mason <chris.mason@fusionio.com>
+
+commit 4adaa611020fa6ac65b0ac8db78276af4ec04e63 upstream.
+
+Btrfs uses page_mkwrite to ensure stable pages during
+crc calculations and mmap workloads. We call clear_page_dirty_for_io
+before we do any crcs, and this forces any application with the file
+mapped to wait for the crc to finish before it is allowed to change
+the file.
+
+With compression on, the clear_page_dirty_for_io step is happening after
+we've compressed the pages. This means the applications might be
+changing the pages while we are compressing them, and some of those
+modifications might not hit the disk.
+
+This commit adds the clear_page_dirty_for_io before compression starts
+and makes sure to redirty the page if we have to fallback to
+uncompressed IO as well.
+
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Reported-by: Alexandre Oliva <oliva@gnu.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 33 +++++++++++++++++++++++++++++++++
+ fs/btrfs/extent_io.h | 2 ++
+ fs/btrfs/inode.c | 14 ++++++++++++++
+ 3 files changed, 49 insertions(+)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1258,6 +1258,39 @@ int unlock_extent(struct extent_io_tree
+ GFP_NOFS);
+ }
+
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ clear_page_dirty_for_io(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ account_page_redirty(page);
++ __set_page_dirty_nobuffers(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
+ /*
+ * helper function to set both pages and extents in the tree writeback
+ */
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -329,6 +329,8 @@ int map_private_extent_buffer(struct ext
+ unsigned long *map_len);
+ int extent_range_uptodate(struct extent_io_tree *tree,
+ u64 start, u64 end);
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
+ int extent_clear_unlock_delalloc(struct inode *inode,
+ struct extent_io_tree *tree,
+ u64 start, u64 end, struct page *locked_page,
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -352,6 +352,7 @@ static noinline int compress_file_range(
+ int i;
+ int will_compress;
+ int compress_type = root->fs_info->compress_type;
++ int redirty = 0;
+
+ /* if this is a small write inside eof, kick off a defrag */
+ if ((end - start + 1) < 16 * 1024 &&
+@@ -414,6 +415,17 @@ again:
+ if (BTRFS_I(inode)->force_compress)
+ compress_type = BTRFS_I(inode)->force_compress;
+
++ /*
++ * we need to call clear_page_dirty_for_io on each
++ * page in the range. Otherwise applications with the file
++ * mmap'd can wander in and change the page contents while
++ * we are compressing them.
++ *
++ * If the compression fails for any reason, we set the pages
++ * dirty again later on.
++ */
++ extent_range_clear_dirty_for_io(inode, start, end);
++ redirty = 1;
+ ret = btrfs_compress_pages(compress_type,
+ inode->i_mapping, start,
+ total_compressed, pages,
+@@ -555,6 +567,8 @@ cleanup_and_bail_uncompressed:
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+ }
++ if (redirty)
++ extent_range_redirty_for_io(inode, start, end);
+ add_async_extent(async_cow, start, end - start + 1,
+ 0, NULL, 0, BTRFS_COMPRESS_NONE);
+ *num_added += 1;
--- /dev/null
+From fdf30d1c1b386e1b73116cc7e0fb14e962b763b0 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Tue, 26 Mar 2013 15:31:45 -0400
+Subject: Btrfs: limit the global reserve to 512mb
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit fdf30d1c1b386e1b73116cc7e0fb14e962b763b0 upstream.
+
+A user reported a problem where he was getting early ENOSPC with hundreds of
+gigs of free data space and 6 gigs of free metadata space. This is because the
+global block reserve was taking up the entire free metadata space. This is
+ridiculous, we have infrastructure in place to throttle if we start using too
+much of the global reserve, so instead of letting it get this huge just limit it
+to 512mb so that users can still get work done. This allowed the user to
+complete his rsync without issues. Thanks
+
+Reported-and-tested-by: Stefan Priebe <s.priebe@profihost.ag>
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent-tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4308,7 +4308,7 @@ static void update_global_block_rsv(stru
+ spin_lock(&sinfo->lock);
+ spin_lock(&block_rsv->lock);
+
+- block_rsv->size = num_bytes;
++ block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+
+ num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
+ sinfo->bytes_reserved + sinfo->bytes_readonly +
--- /dev/null
+From 9bf7a4890518186238d2579be16ecc5190a707c0 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Fri, 1 Mar 2013 13:35:47 -0500
+Subject: Btrfs: use set_nlink if our i_nlink is 0
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit 9bf7a4890518186238d2579be16ecc5190a707c0 upstream.
+
+We need to inc the nlink of deleted entries when running replay so we can do the
+unlink on the fs_root and get everything cleaned up and then have the orphan
+cleanup do the right thing. The problem is inc_nlink complains about this, even
+thought it still does the right thing. So use set_nlink() if our i_nlink is 0
+to keep users from seeing the warnings during log replay. Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1384,7 +1384,10 @@ static noinline int link_to_fixup_dir(st
+
+ btrfs_release_path(path);
+ if (ret == 0) {
+- btrfs_inc_nlink(inode);
++ if (!inode->i_nlink)
++ set_nlink(inode, 1);
++ else
++ btrfs_inc_nlink(inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ } else if (ret == -EEXIST) {
+ ret = 0;
vfs-carefully-propogate-mounts-across-user-namespaces.patch
ipc-restrict-mounting-the-mqueue-filesystem.patch
userns-restrict-when-proc-and-sysfs-can-be-mounted.patch
+btrfs-use-set_nlink-if-our-i_nlink-is-0.patch
+btrfs-fix-locking-on-root_replace-operations-in-tree-mod-log.patch
+btrfs-fix-race-between-mmap-writes-and-compression.patch
+btrfs-limit-the-global-reserve-to-512mb.patch
+btrfs-don-t-drop-path-when-printing-out-tree-errors-in-scrub.patch