--- /dev/null
+From f8567a3845ac05bb28f3c1b478ef752762bd39ef Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Tue, 24 Jun 2014 13:12:55 -0400
+Subject: aio: fix aio request leak when events are reaped by userspace
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit f8567a3845ac05bb28f3c1b478ef752762bd39ef upstream.
+
+The aio cleanups and optimizations by kmo that were merged into the 3.10
+tree added a regression for userspace event reaping. Specifically, the
+reference counts are not decremented if the event is reaped in userspace,
+leading to the application being unable to submit further aio requests.
+This patch applies to 3.12+. A separate backport is required for 3.10/3.11.
+This issue was uncovered as part of CVE-2014-0206.
+
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Kent Overstreet <kmo@daterainc.com>
+Cc: Mateusz Guzik <mguzik@redhat.com>
+Cc: Petr Matousek <pmatouse@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1021,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, lo
+
+ /* everything turned out well, dispose of the aiocb. */
+ kiocb_free(iocb);
++ put_reqs_available(ctx, 1);
+
+ /*
+ * We have to order our ring_info tail store above and test
+@@ -1100,8 +1101,6 @@ static long aio_read_events_ring(struct
+ flush_dcache_page(ctx->ring_pages[0]);
+
+ pr_debug("%li h%u t%u\n", ret, head, tail);
+-
+- put_reqs_available(ctx, ret);
+ out:
+ mutex_unlock(&ctx->ring_lock);
+
--- /dev/null
+From edfbbf388f293d70bf4b7c0bc38774d05e6f711a Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Tue, 24 Jun 2014 13:32:51 -0400
+Subject: aio: fix kernel memory disclosure in io_getevents() introduced in v3.10
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit edfbbf388f293d70bf4b7c0bc38774d05e6f711a upstream.
+
+A kernel memory disclosure was introduced in aio_read_events_ring() in v3.10
+by commit a31ad380bed817aa25f8830ad23e1a0480fef797. The changes made to
+aio_read_events_ring() failed to correctly limit the index into
+ctx->ring_pages[], allowing an attacked to cause the subsequent kmap() of
+an arbitrary page with a copy_to_user() to copy the contents into userspace.
+This vulnerability has been assigned CVE-2014-0206. Thanks to Mateusz and
+Petr for disclosing this issue.
+
+This patch applies to v3.12+. A separate backport is needed for 3.10/3.11.
+
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Mateusz Guzik <mguzik@redhat.com>
+Cc: Petr Matousek <pmatouse@redhat.com>
+Cc: Kent Overstreet <kmo@daterainc.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1063,6 +1063,9 @@ static long aio_read_events_ring(struct
+ if (head == tail)
+ goto out;
+
++ head %= ctx->nr_events;
++ tail %= ctx->nr_events;
++
+ while (ret < nr) {
+ long avail;
+ struct io_event *ev;
--- /dev/null
+From e47043aea3853a74a9aa5726a1faa916d7462ab7 Mon Sep 17 00:00:00 2001
+From: Jason Cooper <jason@lakedaemon.net>
+Date: Wed, 4 Jun 2014 13:41:20 +0000
+Subject: ARM: mvebu: DT: fix OpenBlocks AX3-4 RAM size
+
+From: Jason Cooper <jason@lakedaemon.net>
+
+commit e47043aea3853a74a9aa5726a1faa916d7462ab7 upstream.
+
+The OpenBlocks AX3-4 has a non-DT bootloader. It also comes with 1GB of
+soldered on RAM, and a DIMM slot for expansion.
+
+Unfortunately, atags_to_fdt() doesn't work in big-endian mode, so we see
+the following failure when attempting to boot a big-endian kernel:
+
+ 686 slab pages
+ 17 pages shared
+ 0 pages swap cached
+ [ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name
+ Kernel panic - not syncing: Out of memory and no killable processes...
+
+ CPU: 1 PID: 351 Comm: kworker/u4:0 Not tainted 3.15.0-rc8-next-20140603 #1
+ [<c0215a54>] (unwind_backtrace) from [<c021160c>] (show_stack+0x10/0x14)
+ [<c021160c>] (show_stack) from [<c0802500>] (dump_stack+0x78/0x94)
+ [<c0802500>] (dump_stack) from [<c0800068>] (panic+0x90/0x21c)
+ [<c0800068>] (panic) from [<c02b5704>] (out_of_memory+0x320/0x340)
+ [<c02b5704>] (out_of_memory) from [<c02b93a0>] (__alloc_pages_nodemask+0x874/0x930)
+ [<c02b93a0>] (__alloc_pages_nodemask) from [<c02d446c>] (handle_mm_fault+0x744/0x96c)
+ [<c02d446c>] (handle_mm_fault) from [<c02cf250>] (__get_user_pages+0xd0/0x4c0)
+ [<c02cf250>] (__get_user_pages) from [<c02f3598>] (get_arg_page+0x54/0xbc)
+ [<c02f3598>] (get_arg_page) from [<c02f3878>] (copy_strings+0x278/0x29c)
+ [<c02f3878>] (copy_strings) from [<c02f38bc>] (copy_strings_kernel+0x20/0x28)
+ [<c02f38bc>] (copy_strings_kernel) from [<c02f4f1c>] (do_execve+0x3a8/0x4c8)
+ [<c02f4f1c>] (do_execve) from [<c025ac10>] (____call_usermodehelper+0x15c/0x194)
+ [<c025ac10>] (____call_usermodehelper) from [<c020e9b8>] (ret_from_fork+0x14/0x3c)
+ CPU0: stopping
+ CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.15.0-rc8-next-20140603 #1
+ [<c0215a54>] (unwind_backtrace) from [<c021160c>] (show_stack+0x10/0x14)
+ [<c021160c>] (show_stack) from [<c0802500>] (dump_stack+0x78/0x94)
+ [<c0802500>] (dump_stack) from [<c021429c>] (handle_IPI+0x138/0x174)
+ [<c021429c>] (handle_IPI) from [<c02087f0>] (armada_370_xp_handle_irq+0xb0/0xcc)
+ [<c02087f0>] (armada_370_xp_handle_irq) from [<c0212100>] (__irq_svc+0x40/0x50)
+ Exception stack(0xc0b6bf68 to 0xc0b6bfb0)
+ bf60: e9fad598 00000000 00f509a3 00000000 c0b6a000 c0b724c4
+ bf80: c0b72458 c0b6a000 00000000 00000000 c0b66da0 c0b6a000 00000000 c0b6bfb0
+ bfa0: c027bb94 c027bb24 60000313 ffffffff
+ [<c0212100>] (__irq_svc) from [<c027bb24>] (cpu_startup_entry+0x54/0x214)
+ [<c027bb24>] (cpu_startup_entry) from [<c0ac5b30>] (start_kernel+0x318/0x37c)
+ [<c0ac5b30>] (start_kernel) from [<00208078>] (0x208078)
+ ---[ end Kernel panic - not syncing: Out of memory and no killable processes...
+
+A similar failure will also occur if ARM_ATAG_DTB_COMPAT isn't selected.
+
+Fix this by setting a sane default (1 GB) in the dts file.
+
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Tested-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -25,7 +25,7 @@
+
+ memory {
+ device_type = "memory";
+- reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
++ reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
+ };
+
+ soc {
--- /dev/null
+From f3a183cb422574014538017b5b291a416396f97e Mon Sep 17 00:00:00 2001
+From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Date: Fri, 6 Jun 2014 23:07:16 +0100
+Subject: arm64/dma: Removing ARCH_HAS_DMA_GET_REQUIRED_MASK macro
+
+From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+
+commit f3a183cb422574014538017b5b291a416396f97e upstream.
+
+Arm64 does not define dma_get_required_mask() function.
+Therefore, it should not define the ARCH_HAS_DMA_GET_REQUIRED_MASK.
+This causes build errors in some device drivers (e.g. mpt2sas)
+
+Signed-off-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/dma-mapping.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -26,8 +26,6 @@
+ #include <xen/xen.h>
+ #include <asm/xen/hypervisor.h>
+
+-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+-
+ #define DMA_ERROR_CODE (~(dma_addr_t)0)
+ extern struct dma_map_ops *dma_ops;
+ extern struct dma_map_ops coherent_swiotlb_dma_ops;
--- /dev/null
+From e3a920afc3482e954834a4ed95908c4bc5e4c000 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 18 Jun 2014 14:06:27 +0100
+Subject: arm64: mm: remove broken &= operator from pmd_mknotpresent
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit e3a920afc3482e954834a4ed95908c4bc5e4c000 upstream.
+
+This should be a plain old '&' and could easily lead to undefined
+behaviour if the target of a pmd_mknotpresent invocation was the same
+as the parameter.
+
+Fixes: 9c7e535fcc17 (arm64: mm: Route pmd thp functions through pte equivalents)
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -253,7 +253,7 @@ static inline pmd_t pte_pmd(pte_t pte)
+ #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+ #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+ #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
++#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
+
+ #define __HAVE_ARCH_PMD_WRITE
+ #define pmd_write(pmd) pte_write(pmd_pte(pmd))
--- /dev/null
+From 34c65c43f1518bf85f93526ad373adc6a683b4c5 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 2 Jun 2014 11:47:29 +0100
+Subject: arm64: uid16: fix __kernel_old_{gid,uid}_t definitions
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 34c65c43f1518bf85f93526ad373adc6a683b4c5 upstream.
+
+Whilst native arm64 applications don't have the 16-bit UID/GID syscalls
+wired up, compat tasks can still access them. The 16-bit wrappers for
+these syscalls use __kernel_old_uid_t and __kernel_old_gid_t, which must
+be 16-bit data types to maintain compatibility with the 16-bit UIDs used
+by compat applications.
+
+This patch defines 16-bit __kernel_old_{gid,uid}_t types for arm64
+instead of using the 32-bit types provided by asm-generic.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/Kbuild | 1 -
+ arch/arm64/include/uapi/asm/posix_types.h | 10 ++++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/Kbuild
++++ b/arch/arm64/include/asm/Kbuild
+@@ -30,7 +30,6 @@ generic-y += msgbuf.h
+ generic-y += mutex.h
+ generic-y += pci.h
+ generic-y += poll.h
+-generic-y += posix_types.h
+ generic-y += preempt.h
+ generic-y += resource.h
+ generic-y += rwsem.h
+--- /dev/null
++++ b/arch/arm64/include/uapi/asm/posix_types.h
+@@ -0,0 +1,10 @@
++#ifndef __ASM_POSIX_TYPES_H
++#define __ASM_POSIX_TYPES_H
++
++typedef unsigned short __kernel_old_uid_t;
++typedef unsigned short __kernel_old_gid_t;
++#define __kernel_old_uid_t __kernel_old_uid_t
++
++#include <asm-generic/posix_types.h>
++
++#endif /* __ASM_POSIX_TYPES_H */
--- /dev/null
+From 5a1972bd9fd4b2fb1bac8b7a0b636d633d8717e3 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Date: Wed, 16 Apr 2014 17:02:32 +0800
+Subject: btrfs: Add ctime/mtime update for btrfs device add/remove.
+
+From: Qu Wenruo <quwenruo@cn.fujitsu.com>
+
+commit 5a1972bd9fd4b2fb1bac8b7a0b636d633d8717e3 upstream.
+
+Btrfs will send uevent to udev inform the device change,
+but ctime/mtime for the block device inode is not udpated, which cause
+libblkid used by btrfs-progs unable to detect device change and use old
+cache, causing 'btrfs dev scan; btrfs dev rmove; btrfs dev scan' give an
+error message.
+
+Reported-by: Tsutomu Itoh <t-itoh@jp.fujitsu.com>
+Cc: Karel Zak <kzak@redhat.com>
+Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1452,6 +1452,22 @@ out:
+ return ret;
+ }
+
++/*
++ * Function to update ctime/mtime for a given device path.
++ * Mainly used for ctime/mtime based probe like libblkid.
++ */
++static void update_dev_time(char *path_name)
++{
++ struct file *filp;
++
++ filp = filp_open(path_name, O_RDWR, 0);
++ if (!filp)
++ return;
++ file_update_time(filp);
++ filp_close(filp, NULL);
++ return;
++}
++
+ static int btrfs_rm_dev_item(struct btrfs_root *root,
+ struct btrfs_device *device)
+ {
+@@ -1704,10 +1720,14 @@ int btrfs_rm_device(struct btrfs_root *r
+
+ ret = 0;
+
+- /* Notify udev that device has changed */
+- if (bdev)
++ if (bdev) {
++ /* Notify udev that device has changed */
+ btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
+
++ /* Update ctime/mtime for device path for libblkid */
++ update_dev_time(device_path);
++ }
++
+ error_brelse:
+ brelse(bh);
+ if (bdev)
+@@ -2146,6 +2166,8 @@ int btrfs_init_new_device(struct btrfs_r
+ ret = btrfs_commit_transaction(trans, root);
+ }
+
++ /* Update ctime/mtime for libblkid */
++ update_dev_time(device_path);
+ return ret;
+
+ error_trans:
--- /dev/null
+From c1895442be01c58449e3bf9272f22062a670e08f Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Tue, 27 May 2014 12:59:57 -0400
+Subject: btrfs: allocate raid type kobjects dynamically
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit c1895442be01c58449e3bf9272f22062a670e08f upstream.
+
+We are currently allocating space_info objects in an array when we
+allocate space_info. When a user does something like:
+
+# btrfs balance start -mconvert=raid1 -dconvert=raid1 /mnt
+# btrfs balance start -mconvert=single -dconvert=single /mnt -f
+# btrfs balance start -mconvert=raid1 -dconvert=raid1 /
+
+We can end up with memory corruption since the kobject hasn't
+been reinitialized properly and the name pointer was left set.
+
+The rationale behind allocating them statically was to avoid
+creating a separate kobject container that just contained the
+raid type. It used the index in the array to determine the index.
+
+Ultimately, though, this wastes more memory than it saves in all
+but the most complex scenarios and introduces kobject lifetime
+questions.
+
+This patch allocates the kobjects dynamically instead. Note that
+we also remove the kobject_get/put of the parent kobject since
+kobject_add and kobject_del do that internally.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Reported-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ctree.h | 8 +++++++-
+ fs/btrfs/extent-tree.c | 39 ++++++++++++++++++++++++++-------------
+ fs/btrfs/sysfs.c | 5 +++--
+ 3 files changed, 36 insertions(+), 16 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1113,6 +1113,12 @@ struct btrfs_qgroup_limit_item {
+ __le64 rsv_excl;
+ } __attribute__ ((__packed__));
+
++/* For raid type sysfs entries */
++struct raid_kobject {
++ int raid_type;
++ struct kobject kobj;
++};
++
+ struct btrfs_space_info {
+ spinlock_t lock;
+
+@@ -1163,7 +1169,7 @@ struct btrfs_space_info {
+ wait_queue_head_t wait;
+
+ struct kobject kobj;
+- struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES];
++ struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
+ };
+
+ #define BTRFS_BLOCK_RSV_GLOBAL 1
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3401,10 +3401,8 @@ static int update_space_info(struct btrf
+ return ret;
+ }
+
+- for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
++ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+ INIT_LIST_HEAD(&found->block_groups[i]);
+- kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
+- }
+ init_rwsem(&found->groups_sem);
+ spin_lock_init(&found->lock);
+ found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+@@ -8327,8 +8325,9 @@ int btrfs_free_block_groups(struct btrfs
+ list_del(&space_info->list);
+ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+ struct kobject *kobj;
+- kobj = &space_info->block_group_kobjs[i];
+- if (kobj->parent) {
++ kobj = space_info->block_group_kobjs[i];
++ space_info->block_group_kobjs[i] = NULL;
++ if (kobj) {
+ kobject_del(kobj);
+ kobject_put(kobj);
+ }
+@@ -8352,17 +8351,26 @@ static void __link_block_group(struct bt
+ up_write(&space_info->groups_sem);
+
+ if (first) {
+- struct kobject *kobj = &space_info->block_group_kobjs[index];
++ struct raid_kobject *rkobj;
+ int ret;
+
+- kobject_get(&space_info->kobj); /* put in release */
+- ret = kobject_add(kobj, &space_info->kobj, "%s",
+- get_raid_name(index));
++ rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
++ if (!rkobj)
++ goto out_err;
++ rkobj->raid_type = index;
++ kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
++ ret = kobject_add(&rkobj->kobj, &space_info->kobj,
++ "%s", get_raid_name(index));
+ if (ret) {
+- pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+- kobject_put(&space_info->kobj);
++ kobject_put(&rkobj->kobj);
++ goto out_err;
+ }
++ space_info->block_group_kobjs[index] = &rkobj->kobj;
+ }
++
++ return;
++out_err:
++ pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+ }
+
+ static struct btrfs_block_group_cache *
+@@ -8697,6 +8705,7 @@ int btrfs_remove_block_group(struct btrf
+ struct btrfs_root *tree_root = root->fs_info->tree_root;
+ struct btrfs_key key;
+ struct inode *inode;
++ struct kobject *kobj = NULL;
+ int ret;
+ int index;
+ int factor;
+@@ -8796,11 +8805,15 @@ int btrfs_remove_block_group(struct btrf
+ */
+ list_del_init(&block_group->list);
+ if (list_empty(&block_group->space_info->block_groups[index])) {
+- kobject_del(&block_group->space_info->block_group_kobjs[index]);
+- kobject_put(&block_group->space_info->block_group_kobjs[index]);
++ kobj = block_group->space_info->block_group_kobjs[index];
++ block_group->space_info->block_group_kobjs[index] = NULL;
+ clear_avail_alloc_bits(root->fs_info, block_group->flags);
+ }
+ up_write(&block_group->space_info->groups_sem);
++ if (kobj) {
++ kobject_del(kobj);
++ kobject_put(kobj);
++ }
+
+ if (block_group->cached == BTRFS_CACHE_STARTED)
+ wait_block_group_cache_done(block_group);
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -254,6 +254,7 @@ static ssize_t global_rsv_reserved_show(
+ BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
+
+ #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
++#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
+
+ static ssize_t raid_bytes_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+@@ -266,7 +267,7 @@ static ssize_t raid_bytes_show(struct ko
+ {
+ struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
+ struct btrfs_block_group_cache *block_group;
+- int index = kobj - sinfo->block_group_kobjs;
++ int index = to_raid_kobj(kobj)->raid_type;
+ u64 val = 0;
+
+ down_read(&sinfo->groups_sem);
+@@ -288,7 +289,7 @@ static struct attribute *raid_attributes
+
+ static void release_raid_kobj(struct kobject *kobj)
+ {
+- kobject_put(kobj->parent);
++ kfree(to_raid_kobj(kobj));
+ }
+
+ struct kobj_type btrfs_raid_ktype = {
--- /dev/null
+From 8a56457f5f8fa7c2698ffae8545214c5b96a2cb5 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Thu, 5 Jun 2014 16:08:45 -0400
+Subject: Btrfs: don't check nodes for extent items
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit 8a56457f5f8fa7c2698ffae8545214c5b96a2cb5 upstream.
+
+The backref code was looking at nodes as well as leaves when we tried to
+populate extent item entries. This is not good, and although we go away with it
+for the most part because we'd skip where disk_bytenr != random_memory,
+sometimes random_memory would match and suddenly boom. This fixes that problem.
+Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -984,11 +984,12 @@ again:
+ goto out;
+ }
+ if (ref->count && ref->parent) {
+- if (extent_item_pos && !ref->inode_list) {
++ if (extent_item_pos && !ref->inode_list &&
++ ref->level == 0) {
+ u32 bsz;
+ struct extent_buffer *eb;
+ bsz = btrfs_level_size(fs_info->extent_root,
+- info_level);
++ ref->level);
+ eb = read_tree_block(fs_info->extent_root,
+ ref->parent, bsz, 0);
+ if (!eb || !extent_buffer_uptodate(eb)) {
--- /dev/null
+From 7d78874273463a784759916fc3e0b4e2eb141c70 Mon Sep 17 00:00:00 2001
+From: Chris Mason <clm@fb.com>
+Date: Wed, 21 May 2014 05:49:54 -0700
+Subject: Btrfs: fix double free in find_lock_delalloc_range
+
+From: Chris Mason <clm@fb.com>
+
+commit 7d78874273463a784759916fc3e0b4e2eb141c70 upstream.
+
+We need to NULL the cached_state after freeing it, otherwise
+we might free it again if find_delalloc_range doesn't find anything.
+
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1693,6 +1693,7 @@ again:
+ * shortening the size of the delalloc range we're searching
+ */
+ free_extent_state(cached_state);
++ cached_state = NULL;
+ if (!loops) {
+ max_bytes = PAGE_CACHE_SIZE;
+ loops = 1;
--- /dev/null
+From fc19c5e73645f95d3eca12b4e91e7b56faf1e4a4 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Tue, 29 Apr 2014 13:18:40 +0100
+Subject: Btrfs: fix leaf corruption caused by ENOSPC while hole punching
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit fc19c5e73645f95d3eca12b4e91e7b56faf1e4a4 upstream.
+
+While running a stress test with multiple threads writing to the same btrfs
+file system, I ended up with a situation where a leaf was corrupted in that
+it had 2 file extent item keys that had the same exact key. I was able to
+detect this quickly thanks to the following patch which triggers an assertion
+as soon as a leaf is marked dirty if there are duplicated keys or out of order
+keys:
+
+ Btrfs: check if items are ordered when a leaf is marked dirty
+ (https://patchwork.kernel.org/patch/3955431/)
+
+Basically while running the test, I got the following in dmesg:
+
+ [28877.415877] WARNING: CPU: 2 PID: 10706 at fs/btrfs/file.c:553 btrfs_drop_extent_cache+0x435/0x440 [btrfs]()
+ (...)
+ [28877.415917] Call Trace:
+ [28877.415922] [<ffffffff816f1189>] dump_stack+0x4e/0x68
+ [28877.415926] [<ffffffff8104a32c>] warn_slowpath_common+0x8c/0xc0
+ [28877.415929] [<ffffffff8104a37a>] warn_slowpath_null+0x1a/0x20
+ [28877.415944] [<ffffffffa03775a5>] btrfs_drop_extent_cache+0x435/0x440 [btrfs]
+ [28877.415949] [<ffffffff8118e7be>] ? kmem_cache_alloc+0xfe/0x1c0
+ [28877.415962] [<ffffffffa03777d9>] fill_holes+0x229/0x3e0 [btrfs]
+ [28877.415972] [<ffffffffa0345865>] ? block_rsv_add_bytes+0x55/0x80 [btrfs]
+ [28877.415984] [<ffffffffa03792cb>] btrfs_fallocate+0xb6b/0xc20 [btrfs]
+ (...)
+ [29854.132560] BTRFS critical (device sdc): corrupt leaf, bad key order: block=955232256,root=1, slot=24
+ [29854.132565] BTRFS info (device sdc): leaf 955232256 total ptrs 40 free space 778
+ (...)
+ [29854.132637] item 23 key (3486 108 667648) itemoff 2694 itemsize 53
+ [29854.132638] extent data disk bytenr 14574411776 nr 286720
+ [29854.132639] extent data offset 0 nr 286720 ram 286720
+ [29854.132640] item 24 key (3486 108 954368) itemoff 2641 itemsize 53
+ [29854.132641] extent data disk bytenr 0 nr 0
+ [29854.132643] extent data offset 0 nr 0 ram 0
+ [29854.132644] item 25 key (3486 108 954368) itemoff 2588 itemsize 53
+ [29854.132645] extent data disk bytenr 8699670528 nr 77824
+ [29854.132646] extent data offset 0 nr 77824 ram 77824
+ [29854.132647] item 26 key (3486 108 1146880) itemoff 2535 itemsize 53
+ [29854.132648] extent data disk bytenr 8699670528 nr 77824
+ [29854.132649] extent data offset 0 nr 77824 ram 77824
+ (...)
+ [29854.132707] kernel BUG at fs/btrfs/ctree.h:3901!
+ (...)
+ [29854.132771] Call Trace:
+ [29854.132779] [<ffffffffa0342b5c>] setup_items_for_insert+0x2dc/0x400 [btrfs]
+ [29854.132791] [<ffffffffa0378537>] __btrfs_drop_extents+0xba7/0xdd0 [btrfs]
+ [29854.132794] [<ffffffff8109c0d6>] ? trace_hardirqs_on_caller+0x16/0x1d0
+ [29854.132797] [<ffffffff8109c29d>] ? trace_hardirqs_on+0xd/0x10
+ [29854.132800] [<ffffffff8118e7be>] ? kmem_cache_alloc+0xfe/0x1c0
+ [29854.132810] [<ffffffffa036783b>] insert_reserved_file_extent.constprop.66+0xab/0x310 [btrfs]
+ [29854.132820] [<ffffffffa036a6c6>] __btrfs_prealloc_file_range+0x116/0x340 [btrfs]
+ [29854.132830] [<ffffffffa0374d53>] btrfs_prealloc_file_range+0x23/0x30 [btrfs]
+ (...)
+
+So this is caused by getting an -ENOSPC error while punching a file hole, more
+specifically, we get -ENOSPC error from __btrfs_drop_extents in the while loop
+of file.c:btrfs_punch_hole() when it's unable to modify the btree to delete one
+or more file extent items due to lack of enough free space. When this happens,
+in btrfs_punch_hole(), we attempt to reclaim free space by switching our transaction
+block reservation object to root->fs_info->trans_block_rsv, end our transaction and
+start a new transaction basically - and, we keep increasing our current offset
+(cur_offset) as long as it's smaller than the end of the target range (lockend) -
+this makes use leave the loop with cur_offset == drop_end which in turn makes us
+call fill_holes() for inserting a file extent item that represents a 0 bytes range
+hole (and this insertion succeeds, as in the meanwhile more space became available).
+
+This 0 bytes file hole extent item is a problem because any subsequent caller of
+__btrfs_drop_extents (regular file writes, or fallocate calls for e.g.), with a
+start file offset that is equal to the offset of the hole, will not remove this
+extent item due to the following conditional in the while loop of
+__btrfs_drop_extents:
+
+ if (extent_end <= search_start) {
+ path->slots[0]++;
+ goto next_slot;
+ }
+
+This later makes the call to setup_items_for_insert() (at the very end of
+__btrfs_drop_extents), insert a new file extent item with the same offset as
+the 0 bytes file hole extent item that follows it. Needless is to say that this
+causes chaos, either when reading the leaf from disk (btree_readpage_end_io_hook),
+where we perform leaf sanity checks or in subsequent operations that manipulate
+file extent items, as in the fallocate call as shown by the dmesg trace above.
+
+Without my other patch to perform the leaf sanity checks once a leaf is marked
+as dirty (if the integrity checker is enabled), it would have been much harder
+to debug this issue.
+
+This change might fix a few similar issues reported by users in the mailing
+list regarding assertion failures in btrfs_set_item_key_safe calls performed
+by __btrfs_drop_extents, such as the following report:
+
+ http://comments.gmane.org/gmane.comp.file-systems.btrfs/32938
+
+Asking fill_holes() to create a 0 bytes wide file hole item also produced the
+first warning in the trace above, as we passed a range to btrfs_drop_extent_cache
+that has an end smaller (by -1) than its start.
+
+On 3.14 kernels this issue manifests itself through leaf corruption, as we get
+duplicated file extent item keys in a leaf when calling setup_items_for_insert(),
+but on older kernels, setup_items_for_insert() isn't called by __btrfs_drop_extents(),
+instead we have callers of __btrfs_drop_extents(), namely the functions
+inode.c:insert_inline_extent() and inode.c:insert_reserved_file_extent(), calling
+btrfs_insert_empty_item() to insert the new file extent item, which would fail with
+error -EEXIST, instead of inserting a duplicated key - which is still a serious
+issue as it would make all similar file extent item replace operations keep
+failing if they target the same file range.
+
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/file.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -780,6 +780,18 @@ next_slot:
+ extent_end = search_start;
+ }
+
++ /*
++ * Don't skip extent items representing 0 byte lengths. They
++ * used to be created (bug) if while punching holes we hit
++ * -ENOSPC condition. So if we find one here, just ensure we
++ * delete it, otherwise we would insert a new file extent item
++ * with the same key (offset) as that 0 bytes length file
++ * extent item in the call to setup_items_for_insert() later
++ * in this function.
++ */
++ if (extent_end == key.offset && extent_end >= search_start)
++ goto delete_extent_item;
++
+ if (extent_end <= search_start) {
+ path->slots[0]++;
+ goto next_slot;
+@@ -893,6 +905,7 @@ next_slot:
+ * | ------ extent ------ |
+ */
+ if (start <= key.offset && end >= extent_end) {
++delete_extent_item:
+ if (del_nr == 0) {
+ del_slot = path->slots[0];
+ del_nr = 1;
+@@ -2347,7 +2360,12 @@ static int btrfs_punch_hole(struct inode
+ }
+
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
+- if (cur_offset < ino_size) {
++ /*
++ * Don't insert file hole extent item if it's for a range beyond eof
++ * (because it's useless) or if it represents a 0 bytes range (when
++ * cur_offset == drop_end).
++ */
++ if (cur_offset < ino_size && cur_offset < drop_end) {
+ ret = fill_holes(trans, inode, path, cur_offset, drop_end);
+ if (ret) {
+ err = ret;
--- /dev/null
+From 29cc83f69c8338ff8fd1383c9be263d4bdf52d73 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Sun, 11 May 2014 23:14:59 +0800
+Subject: Btrfs: fix NULL pointer crash of deleting a seed device
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 29cc83f69c8338ff8fd1383c9be263d4bdf52d73 upstream.
+
+Same as normal devices, seed devices should be initialized with
+fs_info->dev_root as well, otherwise we'll get a NULL pointer crash.
+
+Cc: Chris Murphy <lists@colorremedies.com>
+Reported-by: Chris Murphy <lists@colorremedies.com>
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6080,10 +6080,14 @@ void btrfs_init_devices_late(struct btrf
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *device;
+
+- mutex_lock(&fs_devices->device_list_mutex);
+- list_for_each_entry(device, &fs_devices->devices, dev_list)
+- device->dev_root = fs_info->dev_root;
+- mutex_unlock(&fs_devices->device_list_mutex);
++ while (fs_devices) {
++ mutex_lock(&fs_devices->device_list_mutex);
++ list_for_each_entry(device, &fs_devices->devices, dev_list)
++ device->dev_root = fs_info->dev_root;
++ mutex_unlock(&fs_devices->device_list_mutex);
++
++ fs_devices = fs_devices->seed;
++ }
+ }
+
+ static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
--- /dev/null
+From 6eda71d0c030af0fc2f68aaa676e6d445600855b Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Mon, 9 Jun 2014 10:54:07 +0800
+Subject: Btrfs: fix scrub_print_warning to handle skinny metadata extents
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 6eda71d0c030af0fc2f68aaa676e6d445600855b upstream.
+
+The skinny extents are intepreted incorrectly in scrub_print_warning(),
+and end up hitting the BUG() in btrfs_extent_inline_ref_size.
+
+Reported-by: Konstantinos Skarlatos <k.skarlatos@gmail.com>
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c | 30 +++++++++++++++++++-----------
+ fs/btrfs/backref.h | 4 ++--
+ fs/btrfs/scrub.c | 5 +++--
+ 3 files changed, 24 insertions(+), 15 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1405,9 +1405,10 @@ int extent_from_logical(struct btrfs_fs_
+ * returns <0 on error
+ */
+ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+- struct btrfs_extent_item *ei, u32 item_size,
+- struct btrfs_extent_inline_ref **out_eiref,
+- int *out_type)
++ struct btrfs_key *key,
++ struct btrfs_extent_item *ei, u32 item_size,
++ struct btrfs_extent_inline_ref **out_eiref,
++ int *out_type)
+ {
+ unsigned long end;
+ u64 flags;
+@@ -1417,9 +1418,16 @@ static int __get_extent_inline_ref(unsig
+ /* first call */
+ flags = btrfs_extent_flags(eb, ei);
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+- info = (struct btrfs_tree_block_info *)(ei + 1);
+- *out_eiref =
+- (struct btrfs_extent_inline_ref *)(info + 1);
++ if (key->type == BTRFS_METADATA_ITEM_KEY) {
++ /* a skinny metadata extent */
++ *out_eiref =
++ (struct btrfs_extent_inline_ref *)(ei + 1);
++ } else {
++ WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
++ info = (struct btrfs_tree_block_info *)(ei + 1);
++ *out_eiref =
++ (struct btrfs_extent_inline_ref *)(info + 1);
++ }
+ } else {
+ *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ }
+@@ -1429,7 +1437,7 @@ static int __get_extent_inline_ref(unsig
+ }
+
+ end = (unsigned long)ei + item_size;
+- *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
++ *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
+ *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
+
+ *ptr += btrfs_extent_inline_ref_size(*out_type);
+@@ -1448,8 +1456,8 @@ static int __get_extent_inline_ref(unsig
+ * <0 on error.
+ */
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+- struct btrfs_extent_item *ei, u32 item_size,
+- u64 *out_root, u8 *out_level)
++ struct btrfs_key *key, struct btrfs_extent_item *ei,
++ u32 item_size, u64 *out_root, u8 *out_level)
+ {
+ int ret;
+ int type;
+@@ -1460,8 +1468,8 @@ int tree_backref_for_extent(unsigned lon
+ return 1;
+
+ while (1) {
+- ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
+- &eiref, &type);
++ ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
++ &eiref, &type);
+ if (ret < 0)
+ return ret;
+
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_
+ u64 *flags);
+
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+- struct btrfs_extent_item *ei, u32 item_size,
+- u64 *out_root, u8 *out_level);
++ struct btrfs_key *key, struct btrfs_extent_item *ei,
++ u32 item_size, u64 *out_root, u8 *out_level);
+
+ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+ u64 extent_item_objectid,
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -588,8 +588,9 @@ static void scrub_print_warning(const ch
+
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ do {
+- ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
+- &ref_root, &ref_level);
++ ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
++ item_size, &ref_root,
++ &ref_level);
+ printk_in_rcu(KERN_WARNING
+ "BTRFS: %s at logical %llu on dev %s, "
+ "sector %llu: metadata %s (level %d) in tree "
--- /dev/null
+From 3e2426bd0eb980648449e7a2f5a23e3cd3c7725c Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Thu, 12 Jun 2014 00:39:58 -0500
+Subject: btrfs: fix use of uninit "ret" in end_extent_writepage()
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit 3e2426bd0eb980648449e7a2f5a23e3cd3c7725c upstream.
+
+If this condition in end_extent_writepage() is false:
+
+ if (tree->ops && tree->ops->writepage_end_io_hook)
+
+we will then test an uninitialized "ret" at:
+
+ ret = ret < 0 ? ret : -EIO;
+
+The test for ret is for the case where ->writepage_end_io_hook
+failed, and we'd choose that ret as the error; but if
+there is no ->writepage_end_io_hook, nothing sets ret.
+
+Initializing ret to 0 should be sufficient; if
+writepage_end_io_hook wasn't set, (!uptodate) means
+non-zero err was passed in, so we choose -EIO in that case.
+
+Signed-of-by: Eric Sandeen <sandeen@redhat.com>
+
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2354,7 +2354,7 @@ int end_extent_writepage(struct page *pa
+ {
+ int uptodate = (err == 0);
+ struct extent_io_tree *tree;
+- int ret;
++ int ret = 0;
+
+ tree = &BTRFS_I(page->mapping->host)->io_tree;
+
--- /dev/null
+From de348ee022175401e77d7662b7ca6e231a94e3fd Mon Sep 17 00:00:00 2001
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Wed, 9 Apr 2014 19:23:22 +0800
+Subject: Btrfs: make sure there are not any read requests before stopping workers
+
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+
+commit de348ee022175401e77d7662b7ca6e231a94e3fd upstream.
+
+In close_ctree(), after we have stopped all workers,there maybe still
+some read requests(for example readahead) to submit and this *maybe* trigger
+an oops that user reported before:
+
+kernel BUG at fs/btrfs/async-thread.c:619!
+
+By hacking codes, i can reproduce this problem with one cpu available.
+We fix this potential problem by invalidating all btree inode pages before
+stopping all workers.
+
+Thanks to Miao for pointing out this problem.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Reviewed-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3633,6 +3633,11 @@ int close_ctree(struct btrfs_root *root)
+
+ btrfs_free_block_groups(fs_info);
+
++ /*
++ * we must make sure there is not any read request to
++ * submit after we stopping all workers.
++ */
++ invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+ btrfs_stop_all_workers(fs_info);
+
+ free_root_pointers(fs_info, 1);
--- /dev/null
+From 5dca6eea91653e9949ce6eb9e9acab6277e2f2c4 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Mon, 12 May 2014 12:47:36 +0800
+Subject: Btrfs: mark mapping with error flag to report errors to userspace
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 5dca6eea91653e9949ce6eb9e9acab6277e2f2c4 upstream.
+
+According to commit 865ffef3797da2cac85b3354b5b6050dc9660978
+(fs: fix fsync() error reporting),
+it's not stable to just check error pages because pages can be
+truncated or invalidated, we should also mark mapping with error
+flag so that a later fsync can catch the error.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2368,6 +2368,8 @@ int end_extent_writepage(struct page *pa
+ if (!uptodate) {
+ ClearPageUptodate(page);
+ SetPageError(page);
++ ret = ret < 0 ? ret : -EIO;
++ mapping_set_error(page->mapping, ret);
+ }
+ return 0;
+ }
--- /dev/null
+From 32d6b47fe6fc1714d5f1bba1b9f38e0ab0ad58a8 Mon Sep 17 00:00:00 2001
+From: Miao Xie <miaox@cn.fujitsu.com>
+Date: Thu, 24 Apr 2014 13:31:55 +0800
+Subject: Btrfs: output warning instead of error when loading free space cache failed
+
+From: Miao Xie <miaox@cn.fujitsu.com>
+
+commit 32d6b47fe6fc1714d5f1bba1b9f38e0ab0ad58a8 upstream.
+
+If we fail to load a free space cache, we can rebuild it from the extent tree,
+so it is not a serious error, we should not output a error message that
+would make the users uncomfortable. This patch uses warning message instead
+of it.
+
+Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/free-space-cache.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -831,7 +831,7 @@ int load_free_space_cache(struct btrfs_f
+
+ if (!matched) {
+ __btrfs_remove_free_space_cache(ctl);
+- btrfs_err(fs_info, "block group %llu has wrong amount of free space",
++ btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
+ block_group->key.objectid);
+ ret = -1;
+ }
+@@ -843,7 +843,7 @@ out:
+ spin_unlock(&block_group->lock);
+ ret = 0;
+
+- btrfs_err(fs_info, "failed to load free space cache for block group %llu",
++ btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
+ block_group->key.objectid);
+ }
+
--- /dev/null
+From a1a50f60a6bf4f861eb94793420274bc1ccd409a Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Sat, 26 Apr 2014 01:35:31 +0100
+Subject: Btrfs: read inode size after acquiring the mutex when punching a hole
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit a1a50f60a6bf4f861eb94793420274bc1ccd409a upstream.
+
+In a previous change, commit 12870f1c9b2de7d475d22e73fd7db1b418599725,
+I accidentally moved the roundup of inode->i_size to outside of the
+critical section delimited by the inode mutex, which is not atomic and
+not correct since the size can be changed by other task before we acquire
+the mutex. Therefore fix it.
+
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/file.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2200,13 +2200,14 @@ static int btrfs_punch_hole(struct inode
+ bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
+ ((offset + len - 1) >> PAGE_CACHE_SHIFT));
+ bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
+- u64 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
++ u64 ino_size;
+
+ ret = btrfs_wait_ordered_range(inode, offset, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inode->i_mutex);
++ ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
+ /*
+ * We needn't truncate any page which is beyond the end of the file
+ * because we are sure there is no data there.
--- /dev/null
+From c992ec94f24c3e7135d6c23860615f269f0b1d87 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Sat, 22 Mar 2014 17:15:24 +0000
+Subject: Btrfs: send, account for orphan directories when building path strings
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit c992ec94f24c3e7135d6c23860615f269f0b1d87 upstream.
+
+If we have directories with a pending move/rename operation, we must take into
+account any orphan directories that got created before executing the pending
+move/rename. Those orphan directories are directories with an inode number higher
+then the current send progress and that don't exist in the parent snapshot, they
+are created before current progress reaches their inode number, with a generated
+name of the form oN-M-I and at the root of the filesystem tree, and later when
+progress matches their inode number, moved/renamed to their final location.
+
+Reproducer:
+
+ $ mkfs.btrfs -f /dev/sdd
+ $ mount /dev/sdd /mnt
+
+ $ mkdir -p /mnt/a/b/c/d
+ $ mkdir /mnt/a/b/e
+ $ mv /mnt/a/b/c /mnt/a/b/e/CC
+ $ mkdir /mnt/a/b/e/CC/d/f
+ $ mkdir /mnt/a/g
+
+ $ btrfs subvolume snapshot -r /mnt /mnt/snap1
+ $ btrfs send /mnt/snap1 -f /tmp/base.send
+
+ $ mkdir /mnt/a/g/h
+ $ mv /mnt/a/b/e /mnt/a/g/h/EE
+ $ mv /mnt/a/g/h/EE/CC/d /mnt/a/g/h/EE/DD
+
+ $ btrfs subvolume snapshot -r /mnt /mnt/snap2
+ $ btrfs send -p /mnt/snap1 /mnt/snap2 -f /tmp/incremental.send
+
+The second receive command failed with the following error:
+
+ ERROR: rename a/b/e/CC/d -> o264-7-0/EE/DD failed. No such file or directory
+
+A test case for xfstests follows soon.
+
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c | 33 +++++++++------------------------
+ 1 file changed, 9 insertions(+), 24 deletions(-)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -3054,33 +3054,18 @@ static int apply_dir_move(struct send_ct
+ if (ret < 0)
+ goto out;
+
+- if (parent_ino == sctx->cur_ino) {
+- /* child only renamed, not moved */
+- ASSERT(parent_gen == sctx->cur_inode_gen);
+- ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+- from_path);
+- if (ret < 0)
+- goto out;
+- ret = fs_path_add_path(from_path, name);
+- if (ret < 0)
+- goto out;
+- } else {
+- /* child moved and maybe renamed too */
+- sctx->send_progress = pm->ino;
+- ret = get_cur_path(sctx, pm->ino, pm->gen, from_path);
+- if (ret < 0)
+- goto out;
+- }
++ ret = get_cur_path(sctx, parent_ino, parent_gen,
++ from_path);
++ if (ret < 0)
++ goto out;
++ ret = fs_path_add_path(from_path, name);
++ if (ret < 0)
++ goto out;
+
+- fs_path_free(name);
++ fs_path_reset(name);
++ to_path = name;
+ name = NULL;
+
+- to_path = fs_path_alloc();
+- if (!to_path) {
+- ret = -ENOMEM;
+- goto out;
+- }
+-
+ sctx->send_progress = sctx->cur_ino + 1;
+ ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
+ if (ret < 0)
--- /dev/null
+From 1af56070e3ef9477dbc7eba3b9ad7446979c7974 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Sun, 25 May 2014 04:49:24 +0100
+Subject: Btrfs: send, don't error in the presence of subvols/snapshots
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit 1af56070e3ef9477dbc7eba3b9ad7446979c7974 upstream.
+
+If we are doing an incremental send and the base snapshot has a
+directory with name X that doesn't exist anymore in the second
+snapshot and a new subvolume/snapshot exists in the second snapshot
+that has the same name as the directory (name X), the incremental
+send would fail with -ENOENT error. This is because it attempts
+to lookup for an inode with a number matching the objectid of a
+root, which doesn't exist.
+
+Steps to reproduce:
+
+ mkfs.btrfs -f /dev/sdd
+ mount /dev/sdd /mnt
+
+ mkdir /mnt/testdir
+ btrfs subvolume snapshot -r /mnt /mnt/mysnap1
+
+ rmdir /mnt/testdir
+ btrfs subvolume create /mnt/testdir
+ btrfs subvolume snapshot -r /mnt /mnt/mysnap2
+
+ btrfs send -p /mnt/mysnap1 /mnt/mysnap2 -f /tmp/send.data
+
+A test case for xfstests follows.
+
+Reported-by: Robert White <rwhite@pobox.com>
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1628,6 +1628,10 @@ static int lookup_dir_item_inode(struct
+ goto out;
+ }
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
++ if (key.type == BTRFS_ROOT_ITEM_KEY) {
++ ret = -ENOENT;
++ goto out;
++ }
+ *found_inode = key.objectid;
+ *found_type = btrfs_dir_type(path->nodes[0], di);
+
--- /dev/null
+From 7e3ae33efad1490d01040f552ef50e58ed6376ca Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Fri, 23 May 2014 20:15:16 +0100
+Subject: Btrfs: send, use the right limits for xattr names and values
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit 7e3ae33efad1490d01040f552ef50e58ed6376ca upstream.
+
+We were limiting the sum of the xattr name and value lengths to PATH_MAX,
+which is not correct, specially on filesystems created with btrfs-progs
+v3.12 or higher, where the default leaf size is max(16384, PAGE_SIZE), or
+systems with page sizes larger than 4096 bytes.
+
+Xattrs have their own specific maximum name and value lengths, which depend
+on the leaf size, therefore use these limits to be able to send xattrs with
+sizes larger than PATH_MAX.
+
+A test case for xfstests follows.
+
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c | 30 +++++++++++++++++++++++-------
+ 1 file changed, 23 insertions(+), 7 deletions(-)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -975,7 +975,7 @@ static int iterate_dir_item(struct btrfs
+ struct btrfs_dir_item *di;
+ struct btrfs_key di_key;
+ char *buf = NULL;
+- const int buf_len = PATH_MAX;
++ int buf_len;
+ u32 name_len;
+ u32 data_len;
+ u32 cur;
+@@ -985,6 +985,11 @@ static int iterate_dir_item(struct btrfs
+ int num;
+ u8 type;
+
++ if (found_key->type == BTRFS_XATTR_ITEM_KEY)
++ buf_len = BTRFS_MAX_XATTR_SIZE(root);
++ else
++ buf_len = PATH_MAX;
++
+ buf = kmalloc(buf_len, GFP_NOFS);
+ if (!buf) {
+ ret = -ENOMEM;
+@@ -1006,12 +1011,23 @@ static int iterate_dir_item(struct btrfs
+ type = btrfs_dir_type(eb, di);
+ btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+
+- /*
+- * Path too long
+- */
+- if (name_len + data_len > buf_len) {
+- ret = -ENAMETOOLONG;
+- goto out;
++ if (type == BTRFS_FT_XATTR) {
++ if (name_len > XATTR_NAME_MAX) {
++ ret = -ENAMETOOLONG;
++ goto out;
++ }
++ if (name_len + data_len > buf_len) {
++ ret = -E2BIG;
++ goto out;
++ }
++ } else {
++ /*
++ * Path too long
++ */
++ if (name_len + data_len > buf_len) {
++ ret = -ENAMETOOLONG;
++ goto out;
++ }
+ }
+
+ read_extent_buffer(eb, buf, (unsigned long)(di + 1),
--- /dev/null
+From 298658414a2f0bea1f05a81876a45c1cd96aa2e0 Mon Sep 17 00:00:00 2001
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Tue, 13 May 2014 17:05:06 +0800
+Subject: Btrfs: set right total device count for seeding support
+
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+
+commit 298658414a2f0bea1f05a81876a45c1cd96aa2e0 upstream.
+
+Seeding device support allows us to create a new filesystem
+based on existed filesystem.
+
+However newly created filesystem's @total_devices should include seed
+devices. This patch fix the following problem:
+
+ # mkfs.btrfs -f /dev/sdb
+ # btrfstune -S 1 /dev/sdb
+ # mount /dev/sdb /mnt
+ # btrfs device add -f /dev/sdc /mnt --->fs_devices->total_devices = 1
+ # umount /mnt
+ # mount /dev/sdc /mnt --->fs_devices->total_devices = 2
+
+This is because we record right @total_devices in superblock, but
+@fs_devices->total_devices is reset to be 0 in btrfs_prepare_sprout().
+
+Fix this problem by not resetting @fs_devices->total_devices.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1903,7 +1903,6 @@ static int btrfs_prepare_sprout(struct b
+ fs_devices->seeding = 0;
+ fs_devices->num_devices = 0;
+ fs_devices->open_devices = 0;
+- fs_devices->total_devices = 0;
+ fs_devices->seed = seed_devices;
+
+ generate_random_uuid(fs_devices->fsid);
--- /dev/null
+From cd857dd6bc2ae9ecea14e75a34e8a8fdc158e307 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Sun, 8 Jun 2014 19:04:13 +0800
+Subject: Btrfs: use right type to get real comparison
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit cd857dd6bc2ae9ecea14e75a34e8a8fdc158e307 upstream.
+
+We want to make sure the point is still within the extent item, not to verify
+the memory it's pointing to.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1424,7 +1424,7 @@ static int __get_extent_inline_ref(unsig
+ *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ }
+ *ptr = (unsigned long)*out_eiref;
+- if ((void *)*ptr >= (void *)ei + item_size)
++ if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
+ return -ENOENT;
+ }
+
--- /dev/null
+From 663a962151593c69374776e8651238d0da072459 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilovsky@samba.org>
+Date: Sat, 24 May 2014 16:42:02 +0400
+Subject: CIFS: Fix memory leaks in SMB2_open
+
+From: Pavel Shilovsky <pshilovsky@samba.org>
+
+commit 663a962151593c69374776e8651238d0da072459 upstream.
+
+Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
+Reviewed-by: Shirish Pargaonkar <spargaonkar@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1089,6 +1089,7 @@ SMB2_open(const unsigned int xid, struct
+ int rc = 0;
+ unsigned int num_iovecs = 2;
+ __u32 file_attributes = 0;
++ char *dhc_buf = NULL, *lc_buf = NULL;
+
+ cifs_dbg(FYI, "create/open\n");
+
+@@ -1155,6 +1156,7 @@ SMB2_open(const unsigned int xid, struct
+ kfree(copy_path);
+ return rc;
+ }
++ lc_buf = iov[num_iovecs-1].iov_base;
+ }
+
+ if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
+@@ -1169,9 +1171,10 @@ SMB2_open(const unsigned int xid, struct
+ if (rc) {
+ cifs_small_buf_release(req);
+ kfree(copy_path);
+- kfree(iov[num_iovecs-1].iov_base);
++ kfree(lc_buf);
+ return rc;
+ }
++ dhc_buf = iov[num_iovecs-1].iov_base;
+ }
+
+ rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+@@ -1203,6 +1206,8 @@ SMB2_open(const unsigned int xid, struct
+ *oplock = rsp->OplockLevel;
+ creat_exit:
+ kfree(copy_path);
++ kfree(lc_buf);
++ kfree(dhc_buf);
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+ }
--- /dev/null
+From 2aea39eca6b68d6ae7eb545332df0695f56a3d3f Mon Sep 17 00:00:00 2001
+From: Jaegeuk Kim <jaegeuk.kim@samsung.com>
+Date: Thu, 24 Apr 2014 09:49:52 +0900
+Subject: f2fs: submit bio at the reclaim path
+
+From: Jaegeuk Kim <jaegeuk.kim@samsung.com>
+
+commit 2aea39eca6b68d6ae7eb545332df0695f56a3d3f upstream.
+
+If f2fs_write_data_page is called through the reclaim path, we should submit
+the bio right away.
+
+This patch resolves the following issue that Marc Dietrich reported.
+"It took me a while to bisect a problem which causes my ARM (tegra2) netbook to
+frequently stall for 5-10 seconds when I enable EXA acceleration (opentegra
+experimental ddx)."
+And this patch fixes that.
+
+Reported-by: Marc Dietrich <marvin24@gmx.de>
+Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/data.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -835,6 +835,8 @@ out:
+ unlock_page(page);
+ if (need_balance_fs)
+ f2fs_balance_fs(sbi);
++ if (wbc->for_reclaim)
++ f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ return 0;
+
+ redirty_out:
--- /dev/null
+From 8321cf2596d283821acc466377c2b85bcd3422b7 Mon Sep 17 00:00:00 2001
+From: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+Date: Thu, 22 May 2014 22:43:43 +0200
+Subject: fs: btrfs: volumes.c: Fix for possible null pointer dereference
+
+From: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+
+commit 8321cf2596d283821acc466377c2b85bcd3422b7 upstream.
+
+There is otherwise a risk of a possible null pointer dereference.
+
+Was largely found by using a static code analysis program called cppcheck.
+
+Signed-off-by: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1690,11 +1690,12 @@ int btrfs_rm_device(struct btrfs_root *r
+ struct btrfs_fs_devices *fs_devices;
+ fs_devices = root->fs_info->fs_devices;
+ while (fs_devices) {
+- if (fs_devices->seed == cur_devices)
++ if (fs_devices->seed == cur_devices) {
++ fs_devices->seed = cur_devices->seed;
+ break;
++ }
+ fs_devices = fs_devices->seed;
+ }
+- fs_devices->seed = cur_devices->seed;
+ cur_devices->seed = NULL;
+ lock_chunks(root);
+ __btrfs_close_devices(cur_devices);
--- /dev/null
+From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 7 Mar 2013 14:53:45 +0100
+Subject: genirq: Sanitize spurious interrupt detection of threaded irqs
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c upstream.
+
+Till reported that the spurious interrupt detection of threaded
+interrupts is broken in two ways:
+
+- note_interrupt() is called for each action thread of a shared
+ interrupt line. That's wrong as we are only interested whether none
+ of the device drivers felt responsible for the interrupt, but by
+ calling multiple times for a single interrupt line we account
+ IRQ_NONE even if one of the drivers felt responsible.
+
+- note_interrupt() when called from the thread handler is not
+ serialized. That leaves the members of irq_desc which are used for
+ the spurious detection unprotected.
+
+To solve this we need to defer the spurious detection of a threaded
+interrupt to the next hardware interrupt context where we have
+implicit serialization.
+
+If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we
+check whether the previous interrupt requested a deferred check. If
+not, we request a deferred check for the next hardware interrupt and
+return.
+
+If set, we check whether one of the interrupt threads signaled
+success. Depending on this information we feed the result into the
+spurious detector.
+
+If one primary handler of a shared interrupt returns IRQ_HANDLED we
+disable the deferred check of irq threads on the same line, as we have
+found at least one device driver who cared.
+
+Reported-by: Till Straumann <strauman@slac.stanford.edu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Austin Schuh <austin@peloton-tech.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Wolfgang Grandegger <wg@grandegger.com>
+Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-can@vger.kernel.org
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/irqdesc.h | 4 +
+ kernel/irq/manage.c | 4 -
+ kernel/irq/spurious.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 108 insertions(+), 6 deletions(-)
+
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -27,6 +27,8 @@ struct irq_desc;
+ * @irq_count: stats field to detect stalled irqs
+ * @last_unhandled: aging timer for unhandled count
+ * @irqs_unhandled: stats field for spurious unhandled interrupts
++ * @threads_handled: stats field for deferred spurious detection of threaded handlers
++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @lock: locking for SMP
+ * @affinity_hint: hint to user space for preferred irq affinity
+ * @affinity_notify: context for notification of affinity changes
+@@ -52,6 +54,8 @@ struct irq_desc {
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
++ atomic_t threads_handled;
++ int threads_handled_last;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ #ifdef CONFIG_SMP
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -886,8 +886,8 @@ static int irq_thread(void *data)
+ irq_thread_check_affinity(desc, action);
+
+ action_ret = handler_fn(desc, action);
+- if (!noirqdebug)
+- note_interrupt(action->irq, desc, action_ret);
++ if (action_ret == IRQ_HANDLED)
++ atomic_inc(&desc->threads_handled);
+
+ wake_threads_waitq(desc);
+ }
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, stru
+ return action && (action->flags & IRQF_IRQPOLL);
+ }
+
++#define SPURIOUS_DEFERRED 0x80000000
++
+ void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret)
+ {
+@@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, st
+ irq_settings_is_polled(desc))
+ return;
+
+- /* we get here again via the threaded handler */
+- if (action_ret == IRQ_WAKE_THREAD)
+- return;
+-
+ if (bad_action_ret(action_ret)) {
+ report_bad_irq(irq, desc, action_ret);
+ return;
+ }
+
++ /*
++ * We cannot call note_interrupt from the threaded handler
++ * because we need to look at the compound of all handlers
++ * (primary and threaded). Aside of that in the threaded
++ * shared case we have no serialization against an incoming
++ * hardware interrupt while we are dealing with a threaded
++ * result.
++ *
++ * So in case a thread is woken, we just note the fact and
++ * defer the analysis to the next hardware interrupt.
++ *
++ * The threaded handlers store whether they sucessfully
++ * handled an interrupt and we check whether that number
++ * changed versus the last invocation.
++ *
++ * We could handle all interrupts with the delayed by one
++ * mechanism, but for the non forced threaded case we'd just
++ * add pointless overhead to the straight hardirq interrupts
++ * for the sake of a few lines less code.
++ */
++ if (action_ret & IRQ_WAKE_THREAD) {
++ /*
++ * There is a thread woken. Check whether one of the
++ * shared primary handlers returned IRQ_HANDLED. If
++ * not we defer the spurious detection to the next
++ * interrupt.
++ */
++ if (action_ret == IRQ_WAKE_THREAD) {
++ int handled;
++ /*
++ * We use bit 31 of thread_handled_last to
++ * denote the deferred spurious detection
++ * active. No locking necessary as
++ * thread_handled_last is only accessed here
++ * and we have the guarantee that hard
++ * interrupts are not reentrant.
++ */
++ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
++ desc->threads_handled_last |= SPURIOUS_DEFERRED;
++ return;
++ }
++ /*
++ * Check whether one of the threaded handlers
++ * returned IRQ_HANDLED since the last
++ * interrupt happened.
++ *
++ * For simplicity we just set bit 31, as it is
++ * set in threads_handled_last as well. So we
++ * avoid extra masking. And we really do not
++ * care about the high bits of the handled
++ * count. We just care about the count being
++ * different than the one we saw before.
++ */
++ handled = atomic_read(&desc->threads_handled);
++ handled |= SPURIOUS_DEFERRED;
++ if (handled != desc->threads_handled_last) {
++ action_ret = IRQ_HANDLED;
++ /*
++ * Note: We keep the SPURIOUS_DEFERRED
++ * bit set. We are handling the
++ * previous invocation right now.
++ * Keep it for the current one, so the
++ * next hardware interrupt will
++ * account for it.
++ */
++ desc->threads_handled_last = handled;
++ } else {
++ /*
++ * None of the threaded handlers felt
++ * responsible for the last interrupt
++ *
++ * We keep the SPURIOUS_DEFERRED bit
++ * set in threads_handled_last as we
++ * need to account for the current
++ * interrupt as well.
++ */
++ action_ret = IRQ_NONE;
++ }
++ } else {
++ /*
++ * One of the primary handlers returned
++ * IRQ_HANDLED. So we don't care about the
++ * threaded handlers on the same line. Clear
++ * the deferred detection bit.
++ *
++ * In theory we could/should check whether the
++ * deferred bit is set and take the result of
++ * the previous run into account here as
++ * well. But it's really not worth the
++ * trouble. If every other interrupt is
++ * handled we never trigger the spurious
++ * detector. And if this is just the one out
++ * of 100k unhandled ones which is handled
++ * then we merily delay the spurious detection
++ * by one hard interrupt. Not a real problem.
++ */
++ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
++ }
++ }
++
+ if (unlikely(action_ret == IRQ_NONE)) {
+ /*
+ * If we are seeing only the odd spurious IRQ caused by
--- /dev/null
+From 0690a229c69f40a6c9c459ab455c85df49822525 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Sat, 7 Jun 2014 11:31:25 -0400
+Subject: Revert "drm/radeon: use variable UVD clocks"
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 0690a229c69f40a6c9c459ab455c85df49822525 upstream.
+
+This caused reduced performance for some users with advanced post
+processing enabled. We need a better method to pick the
+UVD state based on the amount of post processing required or tune
+the advanced post processing to fit within the lower power state
+envelope.
+
+This reverts commit 14a9579ddbf15dd1992a9481a4ec80b0b91656d5.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_pm.c | 3 +++
+ drivers/gpu/drm/radeon/radeon_uvd.c | 3 ++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -984,6 +984,8 @@ void radeon_dpm_enable_uvd(struct radeon
+ if (enable) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = true;
++ /* disable this for now */
++#if 0
+ if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
+ else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
+@@ -993,6 +995,7 @@ void radeon_dpm_enable_uvd(struct radeon
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
+ else
++#endif
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon
+ (rdev->pm.dpm.hd != hd)) {
+ rdev->pm.dpm.sd = sd;
+ rdev->pm.dpm.hd = hd;
+- streams_changed = true;
++ /* disable this for now */
++ /*streams_changed = true;*/
+ }
+ }
+
--- /dev/null
+From 68986c9f0f4552c34c248501eb0c690553866d6e Mon Sep 17 00:00:00 2001
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Date: Mon, 16 Jun 2014 19:40:20 +1000
+Subject: Revert "offb: Add palette hack for little endian"
+
+From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+
+commit 68986c9f0f4552c34c248501eb0c690553866d6e upstream.
+
+This reverts commit e1edf18b20076da83dd231dbd2146cbbc31c0b14.
+
+This patch was a misguided attempt at fixing offb for LE ppc64
+kernels on BE qemu but is just wrong ... it breaks real LE/LE
+setups, LE with real HW, and existing mixed endian systems
+that did the fight thing with the appropriate device-tree
+property. Bad reviewing on my part, sorry.
+
+The right fix is to either make qemu change its endian when
+the guest changes endian (working on that) or to use the
+existing foreign endian support.
+
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/offb.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/drivers/video/fbdev/offb.c
++++ b/drivers/video/fbdev/offb.c
+@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
+ #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
+ #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
+
+-#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
+-
+-static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
+-{
+- u32 bpp = info->var.bits_per_pixel;
+-
+- return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
+-}
+-
+ /*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
+- pal[regno] = offb_cmap_byteswap(info, value);
++ pal[regno] = value;
+ return 0;
+ }
+
scsi_cmnd-introduce-scsi_transfer_length-helper.patch
libiscsi-iser-adjust-data_length-to-include-protection-information.patch
target-sbc-loopback-adjust-command-data-length-in-case-pi-exists-on-the-wire.patch
+f2fs-submit-bio-at-the-reclaim-path.patch
+arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch
+arm64-uid16-fix-__kernel_old_-gid-uid-_t-definitions.patch
+arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch
+arm64-mm-remove-broken-operator-from-pmd_mknotpresent.patch
+x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
+x86-x32-use-compat-shims-for-io_-setup-submit.patch
+revert-drm-radeon-use-variable-uvd-clocks.patch
+revert-offb-add-palette-hack-for-little-endian.patch
+genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
+aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch
+aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch
+cifs-fix-memory-leaks-in-smb2_open.patch
+btrfs-fix-leaf-corruption-caused-by-enospc-while-hole-punching.patch
+btrfs-fix-double-free-in-find_lock_delalloc_range.patch
+btrfs-read-inode-size-after-acquiring-the-mutex-when-punching-a-hole.patch
+btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch
+btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch
+btrfs-send-account-for-orphan-directories-when-building-path-strings.patch
+btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch
+btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch
+btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch
+btrfs-set-right-total-device-count-for-seeding-support.patch
+btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch
+btrfs-send-use-the-right-limits-for-xattr-names-and-values.patch
+btrfs-allocate-raid-type-kobjects-dynamically.patch
+fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch
+btrfs-don-t-check-nodes-for-extent-items.patch
+btrfs-use-right-type-to-get-real-comparison.patch
+btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch
+btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch
--- /dev/null
+From 246f2d2ee1d715e1077fc47d61c394569c8ee692 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Wed, 30 Apr 2014 14:03:25 -0700
+Subject: x86-32, espfix: Remove filter for espfix32 due to race
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit 246f2d2ee1d715e1077fc47d61c394569c8ee692 upstream.
+
+It is not safe to use LAR to filter when to go down the espfix path,
+because the LDT is per-process (rather than per-thread) and another
+thread might change the descriptors behind our back. Fortunately it
+is always *safe* (if a bit slow) to go down the espfix path, and a
+32-bit LDT stack segment is extremely rare.
+
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_32.S | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -551,11 +551,6 @@ ENTRY(iret_exc)
+
+ CFI_RESTORE_STATE
+ ldt_ss:
+- larl PT_OLDSS(%esp), %eax
+- jnz restore_nocheck
+- testl $0x00400000, %eax # returning to 32bit stack?
+- jnz restore_nocheck # allright, normal return
+-
+ #ifdef CONFIG_PARAVIRT
+ /*
+ * The kernel can't run on a non-flat stack if paravirt mode
--- /dev/null
+From 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 Mon Sep 17 00:00:00 2001
+From: Mike Frysinger <vapier@gentoo.org>
+Date: Sun, 4 May 2014 20:43:15 -0400
+Subject: x86, x32: Use compat shims for io_{setup,submit}
+
+From: Mike Frysinger <vapier@gentoo.org>
+
+commit 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 upstream.
+
+The io_setup takes a pointer to a context id of type aio_context_t.
+This in turn is typed to a __kernel_ulong_t. We could tweak the
+exported headers to define this as a 64bit quantity for specific
+ABIs, but since we already have a 32bit compat shim for the x86 ABI,
+let's just re-use that logic. The libaio package is also written to
+expect this as a pointer type, so a compat shim would simplify that.
+
+The io_submit func operates on an array of pointers to iocb structs.
+Padding out the array to be 64bit aligned is a huge pain, so convert
+it over to the existing compat shim too.
+
+We don't convert io_getevents to the compat func as its only purpose
+is to handle the timespec struct, and the x32 ABI uses 64bit times.
+
+With this change, the libaio package can now pass its testsuite when
+built for the x32 ABI.
+
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Link: http://lkml.kernel.org/r/1399250595-5005-1-git-send-email-vapier@gentoo.org
+Cc: H.J. Lu <hjl.tools@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/syscalls/syscall_64.tbl | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -212,10 +212,10 @@
+ 203 common sched_setaffinity sys_sched_setaffinity
+ 204 common sched_getaffinity sys_sched_getaffinity
+ 205 64 set_thread_area
+-206 common io_setup sys_io_setup
++206 64 io_setup sys_io_setup
+ 207 common io_destroy sys_io_destroy
+ 208 common io_getevents sys_io_getevents
+-209 common io_submit sys_io_submit
++209 64 io_submit sys_io_submit
+ 210 common io_cancel sys_io_cancel
+ 211 64 get_thread_area
+ 212 common lookup_dcookie sys_lookup_dcookie
+@@ -359,3 +359,5 @@
+ 540 x32 process_vm_writev compat_sys_process_vm_writev
+ 541 x32 setsockopt compat_sys_setsockopt
+ 542 x32 getsockopt compat_sys_getsockopt
++543 x32 io_setup compat_sys_io_setup
++544 x32 io_submit compat_sys_io_submit