]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Jun 2014 01:12:24 +0000 (18:12 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Jun 2014 01:12:24 +0000 (18:12 -0700)
added patches:
aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch
aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch
arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch
arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch
btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch
btrfs-allocate-raid-type-kobjects-dynamically.patch
btrfs-don-t-check-nodes-for-extent-items.patch
btrfs-fix-double-free-in-find_lock_delalloc_range.patch
btrfs-fix-lockdep-warning-with-reclaim-lock-inversion.patch
btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch
btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch
btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch
btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch
btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch
btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch
btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch
btrfs-set-right-total-device-count-for-seeding-support.patch
btrfs-use-right-type-to-get-real-comparison.patch
cifs-fix-memory-leaks-in-smb2_open.patch
fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch
genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
x86-x32-use-compat-shims-for-io_-setup-submit.patch

24 files changed:
queue-3.14/aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch [new file with mode: 0644]
queue-3.14/aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch [new file with mode: 0644]
queue-3.14/arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch [new file with mode: 0644]
queue-3.14/arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch [new file with mode: 0644]
queue-3.14/btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch [new file with mode: 0644]
queue-3.14/btrfs-allocate-raid-type-kobjects-dynamically.patch [new file with mode: 0644]
queue-3.14/btrfs-don-t-check-nodes-for-extent-items.patch [new file with mode: 0644]
queue-3.14/btrfs-fix-double-free-in-find_lock_delalloc_range.patch [new file with mode: 0644]
queue-3.14/btrfs-fix-lockdep-warning-with-reclaim-lock-inversion.patch [new file with mode: 0644]
queue-3.14/btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch [new file with mode: 0644]
queue-3.14/btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch [new file with mode: 0644]
queue-3.14/btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch [new file with mode: 0644]
queue-3.14/btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch [new file with mode: 0644]
queue-3.14/btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch [new file with mode: 0644]
queue-3.14/btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch [new file with mode: 0644]
queue-3.14/btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch [new file with mode: 0644]
queue-3.14/btrfs-set-right-total-device-count-for-seeding-support.patch [new file with mode: 0644]
queue-3.14/btrfs-use-right-type-to-get-real-comparison.patch [new file with mode: 0644]
queue-3.14/cifs-fix-memory-leaks-in-smb2_open.patch [new file with mode: 0644]
queue-3.14/fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch [new file with mode: 0644]
queue-3.14/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch [new file with mode: 0644]
queue-3.14/x86-x32-use-compat-shims-for-io_-setup-submit.patch [new file with mode: 0644]

diff --git a/queue-3.14/aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch b/queue-3.14/aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch
new file mode 100644 (file)
index 0000000..1266d21
--- /dev/null
@@ -0,0 +1,45 @@
+From f8567a3845ac05bb28f3c1b478ef752762bd39ef Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Tue, 24 Jun 2014 13:12:55 -0400
+Subject: aio: fix aio request leak when events are reaped by userspace
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit f8567a3845ac05bb28f3c1b478ef752762bd39ef upstream.
+
+The aio cleanups and optimizations by kmo that were merged into the 3.10
+tree added a regression for userspace event reaping.  Specifically, the
+reference counts are not decremented if the event is reaped in userspace,
+leading to the application being unable to submit further aio requests.
+This patch applies to 3.12+.  A separate backport is required for 3.10/3.11.
+This issue was uncovered as part of CVE-2014-0206.
+
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Kent Overstreet <kmo@daterainc.com>
+Cc: Mateusz Guzik <mguzik@redhat.com>
+Cc: Petr Matousek <pmatouse@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1007,6 +1007,7 @@ void aio_complete(struct kiocb *iocb, lo
+       /* everything turned out well, dispose of the aiocb. */
+       kiocb_free(iocb);
++      put_reqs_available(ctx, 1);
+       /*
+        * We have to order our ring_info tail store above and test
+@@ -1086,8 +1087,6 @@ static long aio_read_events_ring(struct
+       flush_dcache_page(ctx->ring_pages[0]);
+       pr_debug("%li  h%u t%u\n", ret, head, tail);
+-
+-      put_reqs_available(ctx, ret);
+ out:
+       mutex_unlock(&ctx->ring_lock);
diff --git a/queue-3.14/aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch b/queue-3.14/aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch
new file mode 100644 (file)
index 0000000..b68c1d6
--- /dev/null
@@ -0,0 +1,42 @@
+From edfbbf388f293d70bf4b7c0bc38774d05e6f711a Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Tue, 24 Jun 2014 13:32:51 -0400
+Subject: aio: fix kernel memory disclosure in io_getevents() introduced in v3.10
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit edfbbf388f293d70bf4b7c0bc38774d05e6f711a upstream.
+
+A kernel memory disclosure was introduced in aio_read_events_ring() in v3.10
+by commit a31ad380bed817aa25f8830ad23e1a0480fef797.  The changes made to
+aio_read_events_ring() failed to correctly limit the index into
+ctx->ring_pages[], allowing an attacked to cause the subsequent kmap() of
+an arbitrary page with a copy_to_user() to copy the contents into userspace.
+This vulnerability has been assigned CVE-2014-0206.  Thanks to Mateusz and
+Petr for disclosing this issue.
+
+This patch applies to v3.12+.  A separate backport is needed for 3.10/3.11.
+
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Mateusz Guzik <mguzik@redhat.com>
+Cc: Petr Matousek <pmatouse@redhat.com>
+Cc: Kent Overstreet <kmo@daterainc.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1049,6 +1049,9 @@ static long aio_read_events_ring(struct
+       if (head == tail)
+               goto out;
++      head %= ctx->nr_events;
++      tail %= ctx->nr_events;
++
+       while (ret < nr) {
+               long avail;
+               struct io_event *ev;
diff --git a/queue-3.14/arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch b/queue-3.14/arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch
new file mode 100644 (file)
index 0000000..c927480
--- /dev/null
@@ -0,0 +1,75 @@
+From e47043aea3853a74a9aa5726a1faa916d7462ab7 Mon Sep 17 00:00:00 2001
+From: Jason Cooper <jason@lakedaemon.net>
+Date: Wed, 4 Jun 2014 13:41:20 +0000
+Subject: ARM: mvebu: DT: fix OpenBlocks AX3-4 RAM size
+
+From: Jason Cooper <jason@lakedaemon.net>
+
+commit e47043aea3853a74a9aa5726a1faa916d7462ab7 upstream.
+
+The OpenBlocks AX3-4 has a non-DT bootloader.  It also comes with 1GB of
+soldered on RAM, and a DIMM slot for expansion.
+
+Unfortunately, atags_to_fdt() doesn't work in big-endian mode, so we see
+the following failure when attempting to boot a big-endian kernel:
+
+  686 slab pages
+  17 pages shared
+  0 pages swap cached
+  [ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name
+  Kernel panic - not syncing: Out of memory and no killable processes...
+
+  CPU: 1 PID: 351 Comm: kworker/u4:0 Not tainted 3.15.0-rc8-next-20140603 #1
+  [<c0215a54>] (unwind_backtrace) from [<c021160c>] (show_stack+0x10/0x14)
+  [<c021160c>] (show_stack) from [<c0802500>] (dump_stack+0x78/0x94)
+  [<c0802500>] (dump_stack) from [<c0800068>] (panic+0x90/0x21c)
+  [<c0800068>] (panic) from [<c02b5704>] (out_of_memory+0x320/0x340)
+  [<c02b5704>] (out_of_memory) from [<c02b93a0>] (__alloc_pages_nodemask+0x874/0x930)
+  [<c02b93a0>] (__alloc_pages_nodemask) from [<c02d446c>] (handle_mm_fault+0x744/0x96c)
+  [<c02d446c>] (handle_mm_fault) from [<c02cf250>] (__get_user_pages+0xd0/0x4c0)
+  [<c02cf250>] (__get_user_pages) from [<c02f3598>] (get_arg_page+0x54/0xbc)
+  [<c02f3598>] (get_arg_page) from [<c02f3878>] (copy_strings+0x278/0x29c)
+  [<c02f3878>] (copy_strings) from [<c02f38bc>] (copy_strings_kernel+0x20/0x28)
+  [<c02f38bc>] (copy_strings_kernel) from [<c02f4f1c>] (do_execve+0x3a8/0x4c8)
+  [<c02f4f1c>] (do_execve) from [<c025ac10>] (____call_usermodehelper+0x15c/0x194)
+  [<c025ac10>] (____call_usermodehelper) from [<c020e9b8>] (ret_from_fork+0x14/0x3c)
+  CPU0: stopping
+  CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.15.0-rc8-next-20140603 #1
+  [<c0215a54>] (unwind_backtrace) from [<c021160c>] (show_stack+0x10/0x14)
+  [<c021160c>] (show_stack) from [<c0802500>] (dump_stack+0x78/0x94)
+  [<c0802500>] (dump_stack) from [<c021429c>] (handle_IPI+0x138/0x174)
+  [<c021429c>] (handle_IPI) from [<c02087f0>] (armada_370_xp_handle_irq+0xb0/0xcc)
+  [<c02087f0>] (armada_370_xp_handle_irq) from [<c0212100>] (__irq_svc+0x40/0x50)
+  Exception stack(0xc0b6bf68 to 0xc0b6bfb0)
+  bf60:                   e9fad598 00000000 00f509a3 00000000 c0b6a000 c0b724c4
+  bf80: c0b72458 c0b6a000 00000000 00000000 c0b66da0 c0b6a000 00000000 c0b6bfb0
+  bfa0: c027bb94 c027bb24 60000313 ffffffff
+  [<c0212100>] (__irq_svc) from [<c027bb24>] (cpu_startup_entry+0x54/0x214)
+  [<c027bb24>] (cpu_startup_entry) from [<c0ac5b30>] (start_kernel+0x318/0x37c)
+  [<c0ac5b30>] (start_kernel) from [<00208078>] (0x208078)
+  ---[ end Kernel panic - not syncing: Out of memory and no killable processes...
+
+A similar failure will also occur if ARM_ATAG_DTB_COMPAT isn't selected.
+
+Fix this by setting a sane default (1 GB) in the dts file.
+
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Tested-by: Kevin Hilman <khilman@linaro.org>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -23,7 +23,7 @@
+       memory {
+               device_type = "memory";
+-              reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
++              reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
+       };
+       soc {
diff --git a/queue-3.14/arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch b/queue-3.14/arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch
new file mode 100644 (file)
index 0000000..b52ea2d
--- /dev/null
@@ -0,0 +1,32 @@
+From f3a183cb422574014538017b5b291a416396f97e Mon Sep 17 00:00:00 2001
+From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Date: Fri, 6 Jun 2014 23:07:16 +0100
+Subject: arm64/dma: Removing ARCH_HAS_DMA_GET_REQUIRED_MASK macro
+
+From: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+
+commit f3a183cb422574014538017b5b291a416396f97e upstream.
+
+Arm64 does not define dma_get_required_mask() function.
+Therefore, it should not define the ARCH_HAS_DMA_GET_REQUIRED_MASK.
+This causes build errors in some device drivers (e.g. mpt2sas)
+
+Signed-off-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/dma-mapping.h |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -26,8 +26,6 @@
+ #include <xen/xen.h>
+ #include <asm/xen/hypervisor.h>
+-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+-
+ #define DMA_ERROR_CODE        (~(dma_addr_t)0)
+ extern struct dma_map_ops *dma_ops;
diff --git a/queue-3.14/btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch b/queue-3.14/btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch
new file mode 100644 (file)
index 0000000..96ad640
--- /dev/null
@@ -0,0 +1,76 @@
+From 5a1972bd9fd4b2fb1bac8b7a0b636d633d8717e3 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Date: Wed, 16 Apr 2014 17:02:32 +0800
+Subject: btrfs: Add ctime/mtime update for btrfs device add/remove.
+
+From: Qu Wenruo <quwenruo@cn.fujitsu.com>
+
+commit 5a1972bd9fd4b2fb1bac8b7a0b636d633d8717e3 upstream.
+
+Btrfs will send uevent to udev inform the device change,
+but ctime/mtime for the block device inode is not udpated, which cause
+libblkid used by btrfs-progs unable to detect device change and use old
+cache, causing 'btrfs dev scan; btrfs dev rmove; btrfs dev scan' give an
+error message.
+
+Reported-by: Tsutomu Itoh <t-itoh@jp.fujitsu.com>
+Cc: Karel Zak <kzak@redhat.com>
+Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |   26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1438,6 +1438,22 @@ out:
+       return ret;
+ }
++/*
++ * Function to update ctime/mtime for a given device path.
++ * Mainly used for ctime/mtime based probe like libblkid.
++ */
++static void update_dev_time(char *path_name)
++{
++      struct file *filp;
++
++      filp = filp_open(path_name, O_RDWR, 0);
++      if (!filp)
++              return;
++      file_update_time(filp);
++      filp_close(filp, NULL);
++      return;
++}
++
+ static int btrfs_rm_dev_item(struct btrfs_root *root,
+                            struct btrfs_device *device)
+ {
+@@ -1690,10 +1706,14 @@ int btrfs_rm_device(struct btrfs_root *r
+       ret = 0;
+-      /* Notify udev that device has changed */
+-      if (bdev)
++      if (bdev) {
++              /* Notify udev that device has changed */
+               btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
++              /* Update ctime/mtime for device path for libblkid */
++              update_dev_time(device_path);
++      }
++
+ error_brelse:
+       brelse(bh);
+       if (bdev)
+@@ -2132,6 +2152,8 @@ int btrfs_init_new_device(struct btrfs_r
+               ret = btrfs_commit_transaction(trans, root);
+       }
++      /* Update ctime/mtime for libblkid */
++      update_dev_time(device_path);
+       return ret;
+ error_trans:
diff --git a/queue-3.14/btrfs-allocate-raid-type-kobjects-dynamically.patch b/queue-3.14/btrfs-allocate-raid-type-kobjects-dynamically.patch
new file mode 100644 (file)
index 0000000..1ddb612
--- /dev/null
@@ -0,0 +1,179 @@
+From c1895442be01c58449e3bf9272f22062a670e08f Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Tue, 27 May 2014 12:59:57 -0400
+Subject: btrfs: allocate raid type kobjects dynamically
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit c1895442be01c58449e3bf9272f22062a670e08f upstream.
+
+We are currently allocating space_info objects in an array when we
+allocate space_info. When a user does something like:
+
+# btrfs balance start -mconvert=raid1 -dconvert=raid1 /mnt
+# btrfs balance start -mconvert=single -dconvert=single /mnt -f
+# btrfs balance start -mconvert=raid1 -dconvert=raid1 /
+
+We can end up with memory corruption since the kobject hasn't
+been reinitialized properly and the name pointer was left set.
+
+The rationale behind allocating them statically was to avoid
+creating a separate kobject container that just contained the
+raid type. It used the index in the array to determine the index.
+
+Ultimately, though, this wastes more memory than it saves in all
+but the most complex scenarios and introduces kobject lifetime
+questions.
+
+This patch allocates the kobjects dynamically instead. Note that
+we also remove the kobject_get/put of the parent kobject since
+kobject_add and kobject_del do that internally.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Reported-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/ctree.h       |    8 +++++++-
+ fs/btrfs/extent-tree.c |   39 ++++++++++++++++++++++++++-------------
+ fs/btrfs/sysfs.c       |    5 +++--
+ 3 files changed, 36 insertions(+), 16 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1104,6 +1104,12 @@ struct btrfs_qgroup_limit_item {
+       __le64 rsv_excl;
+ } __attribute__ ((__packed__));
++/* For raid type sysfs entries */
++struct raid_kobject {
++      int raid_type;
++      struct kobject kobj;
++};
++
+ struct btrfs_space_info {
+       spinlock_t lock;
+@@ -1154,7 +1160,7 @@ struct btrfs_space_info {
+       wait_queue_head_t wait;
+       struct kobject kobj;
+-      struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES];
++      struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
+ };
+ #define       BTRFS_BLOCK_RSV_GLOBAL          1
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3400,10 +3400,8 @@ static int update_space_info(struct btrf
+               return ret;
+       }
+-      for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
++      for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+               INIT_LIST_HEAD(&found->block_groups[i]);
+-              kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
+-      }
+       init_rwsem(&found->groups_sem);
+       spin_lock_init(&found->lock);
+       found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+@@ -8328,8 +8326,9 @@ int btrfs_free_block_groups(struct btrfs
+               list_del(&space_info->list);
+               for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+                       struct kobject *kobj;
+-                      kobj = &space_info->block_group_kobjs[i];
+-                      if (kobj->parent) {
++                      kobj = space_info->block_group_kobjs[i];
++                      space_info->block_group_kobjs[i] = NULL;
++                      if (kobj) {
+                               kobject_del(kobj);
+                               kobject_put(kobj);
+                       }
+@@ -8353,17 +8352,26 @@ static void __link_block_group(struct bt
+       up_write(&space_info->groups_sem);
+       if (first) {
+-              struct kobject *kobj = &space_info->block_group_kobjs[index];
++              struct raid_kobject *rkobj;
+               int ret;
+-              kobject_get(&space_info->kobj); /* put in release */
+-              ret = kobject_add(kobj, &space_info->kobj, "%s",
+-                                get_raid_name(index));
++              rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
++              if (!rkobj)
++                      goto out_err;
++              rkobj->raid_type = index;
++              kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
++              ret = kobject_add(&rkobj->kobj, &space_info->kobj,
++                                "%s", get_raid_name(index));
+               if (ret) {
+-                      pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+-                      kobject_put(&space_info->kobj);
++                      kobject_put(&rkobj->kobj);
++                      goto out_err;
+               }
++              space_info->block_group_kobjs[index] = &rkobj->kobj;
+       }
++
++      return;
++out_err:
++      pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+ }
+ static struct btrfs_block_group_cache *
+@@ -8698,6 +8706,7 @@ int btrfs_remove_block_group(struct btrf
+       struct btrfs_root *tree_root = root->fs_info->tree_root;
+       struct btrfs_key key;
+       struct inode *inode;
++      struct kobject *kobj = NULL;
+       int ret;
+       int index;
+       int factor;
+@@ -8797,11 +8806,15 @@ int btrfs_remove_block_group(struct btrf
+        */
+       list_del_init(&block_group->list);
+       if (list_empty(&block_group->space_info->block_groups[index])) {
+-              kobject_del(&block_group->space_info->block_group_kobjs[index]);
+-              kobject_put(&block_group->space_info->block_group_kobjs[index]);
++              kobj = block_group->space_info->block_group_kobjs[index];
++              block_group->space_info->block_group_kobjs[index] = NULL;
+               clear_avail_alloc_bits(root->fs_info, block_group->flags);
+       }
+       up_write(&block_group->space_info->groups_sem);
++      if (kobj) {
++              kobject_del(kobj);
++              kobject_put(kobj);
++      }
+       if (block_group->cached == BTRFS_CACHE_STARTED)
+               wait_block_group_cache_done(block_group);
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -253,6 +253,7 @@ static ssize_t global_rsv_reserved_show(
+ BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
+ #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
++#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
+ static ssize_t raid_bytes_show(struct kobject *kobj,
+                              struct kobj_attribute *attr, char *buf);
+@@ -265,7 +266,7 @@ static ssize_t raid_bytes_show(struct ko
+ {
+       struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
+       struct btrfs_block_group_cache *block_group;
+-      int index = kobj - sinfo->block_group_kobjs;
++      int index = to_raid_kobj(kobj)->raid_type;
+       u64 val = 0;
+       down_read(&sinfo->groups_sem);
+@@ -287,7 +288,7 @@ static struct attribute *raid_attributes
+ static void release_raid_kobj(struct kobject *kobj)
+ {
+-      kobject_put(kobj->parent);
++      kfree(to_raid_kobj(kobj));
+ }
+ struct kobj_type btrfs_raid_ktype = {
diff --git a/queue-3.14/btrfs-don-t-check-nodes-for-extent-items.patch b/queue-3.14/btrfs-don-t-check-nodes-for-extent-items.patch
new file mode 100644 (file)
index 0000000..524de9c
--- /dev/null
@@ -0,0 +1,40 @@
+From 8a56457f5f8fa7c2698ffae8545214c5b96a2cb5 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Thu, 5 Jun 2014 16:08:45 -0400
+Subject: Btrfs: don't check nodes for extent items
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit 8a56457f5f8fa7c2698ffae8545214c5b96a2cb5 upstream.
+
+The backref code was looking at nodes as well as leaves when we tried to
+populate extent item entries.  This is not good, and although we go away with it
+for the most part because we'd skip where disk_bytenr != random_memory,
+sometimes random_memory would match and suddenly boom.  This fixes that problem.
+Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -972,11 +972,12 @@ again:
+                               goto out;
+               }
+               if (ref->count && ref->parent) {
+-                      if (extent_item_pos && !ref->inode_list) {
++                      if (extent_item_pos && !ref->inode_list &&
++                          ref->level == 0) {
+                               u32 bsz;
+                               struct extent_buffer *eb;
+                               bsz = btrfs_level_size(fs_info->extent_root,
+-                                                      info_level);
++                                                      ref->level);
+                               eb = read_tree_block(fs_info->extent_root,
+                                                          ref->parent, bsz, 0);
+                               if (!eb || !extent_buffer_uptodate(eb)) {
diff --git a/queue-3.14/btrfs-fix-double-free-in-find_lock_delalloc_range.patch b/queue-3.14/btrfs-fix-double-free-in-find_lock_delalloc_range.patch
new file mode 100644 (file)
index 0000000..73bc2ee
--- /dev/null
@@ -0,0 +1,29 @@
+From 7d78874273463a784759916fc3e0b4e2eb141c70 Mon Sep 17 00:00:00 2001
+From: Chris Mason <clm@fb.com>
+Date: Wed, 21 May 2014 05:49:54 -0700
+Subject: Btrfs: fix double free in find_lock_delalloc_range
+
+From: Chris Mason <clm@fb.com>
+
+commit 7d78874273463a784759916fc3e0b4e2eb141c70 upstream.
+
+We need to NULL the cached_state after freeing it, otherwise
+we might free it again if find_delalloc_range doesn't find anything.
+
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1686,6 +1686,7 @@ again:
+                * shortening the size of the delalloc range we're searching
+                */
+               free_extent_state(cached_state);
++              cached_state = NULL;
+               if (!loops) {
+                       max_bytes = PAGE_CACHE_SIZE;
+                       loops = 1;
diff --git a/queue-3.14/btrfs-fix-lockdep-warning-with-reclaim-lock-inversion.patch b/queue-3.14/btrfs-fix-lockdep-warning-with-reclaim-lock-inversion.patch
new file mode 100644 (file)
index 0000000..201313f
--- /dev/null
@@ -0,0 +1,79 @@
+From ed55b6ac077fe7f9c6490ff55172c4b563562d7c Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Wed, 26 Mar 2014 14:11:26 -0400
+Subject: btrfs: fix lockdep warning with reclaim lock inversion
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit ed55b6ac077fe7f9c6490ff55172c4b563562d7c upstream.
+
+When encountering memory pressure, testers have run into the following
+lockdep warning. It was caused by __link_block_group calling kobject_add
+with the groups_sem held. kobject_add calls kvasprintf with GFP_KERNEL,
+which gets us into reclaim context. The kobject doesn't actually need
+to be added under the lock -- it just needs to ensure that it's only
+added for the first block group to be linked.
+
+=========================================================
+[ INFO: possible irq lock inversion dependency detected ]
+3.14.0-rc8-default #1 Not tainted
+---------------------------------------------------------
+kswapd0/169 just changed the state of lock:
+ (&delayed_node->mutex){+.+.-.}, at: [<ffffffffa018baea>] __btrfs_release_delayed_node+0x3a/0x200 [btrfs]
+but this lock took another, RECLAIM_FS-unsafe lock in the past:
+ (&found->groups_sem){+++++.}
+
+and interrupts could create inverse lock ordering between them.
+
+other info that might help us debug this:
+ Possible interrupt unsafe locking scenario:
+       CPU0                    CPU1
+       ----                    ----
+  lock(&found->groups_sem);
+                               local_irq_disable();
+                               lock(&delayed_node->mutex);
+                               lock(&found->groups_sem);
+  <Interrupt>
+    lock(&delayed_node->mutex);
+
+ *** DEADLOCK ***
+2 locks held by kswapd0/169:
+ #0:  (shrinker_rwsem){++++..}, at: [<ffffffff81159e8a>] shrink_slab+0x3a/0x160
+ #1:  (&type->s_umount_key#27){++++..}, at: [<ffffffff811bac6f>] grab_super_passive+0x3f/0x90
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent-tree.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -8344,9 +8344,15 @@ static void __link_block_group(struct bt
+                              struct btrfs_block_group_cache *cache)
+ {
+       int index = get_block_group_index(cache);
++      bool first = false;
+       down_write(&space_info->groups_sem);
+-      if (list_empty(&space_info->block_groups[index])) {
++      if (list_empty(&space_info->block_groups[index]))
++              first = true;
++      list_add_tail(&cache->list, &space_info->block_groups[index]);
++      up_write(&space_info->groups_sem);
++
++      if (first) {
+               struct kobject *kobj = &space_info->block_group_kobjs[index];
+               int ret;
+@@ -8358,8 +8364,6 @@ static void __link_block_group(struct bt
+                       kobject_put(&space_info->kobj);
+               }
+       }
+-      list_add_tail(&cache->list, &space_info->block_groups[index]);
+-      up_write(&space_info->groups_sem);
+ }
+ static struct btrfs_block_group_cache *
diff --git a/queue-3.14/btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch b/queue-3.14/btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch
new file mode 100644 (file)
index 0000000..aa3cbd1
--- /dev/null
@@ -0,0 +1,43 @@
+From 29cc83f69c8338ff8fd1383c9be263d4bdf52d73 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Sun, 11 May 2014 23:14:59 +0800
+Subject: Btrfs: fix NULL pointer crash of deleting a seed device
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 29cc83f69c8338ff8fd1383c9be263d4bdf52d73 upstream.
+
+Same as normal devices, seed devices should be initialized with
+fs_info->dev_root as well, otherwise we'll get a NULL pointer crash.
+
+Cc: Chris Murphy <lists@colorremedies.com>
+Reported-by: Chris Murphy <lists@colorremedies.com>
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6057,10 +6057,14 @@ void btrfs_init_devices_late(struct btrf
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct btrfs_device *device;
+-      mutex_lock(&fs_devices->device_list_mutex);
+-      list_for_each_entry(device, &fs_devices->devices, dev_list)
+-              device->dev_root = fs_info->dev_root;
+-      mutex_unlock(&fs_devices->device_list_mutex);
++      while (fs_devices) {
++              mutex_lock(&fs_devices->device_list_mutex);
++              list_for_each_entry(device, &fs_devices->devices, dev_list)
++                      device->dev_root = fs_info->dev_root;
++              mutex_unlock(&fs_devices->device_list_mutex);
++
++              fs_devices = fs_devices->seed;
++      }
+ }
+ static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
diff --git a/queue-3.14/btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch b/queue-3.14/btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch
new file mode 100644 (file)
index 0000000..ccd8bb8
--- /dev/null
@@ -0,0 +1,117 @@
+From 6eda71d0c030af0fc2f68aaa676e6d445600855b Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Mon, 9 Jun 2014 10:54:07 +0800
+Subject: Btrfs: fix scrub_print_warning to handle skinny metadata extents
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 6eda71d0c030af0fc2f68aaa676e6d445600855b upstream.
+
+The skinny extents are intepreted incorrectly in scrub_print_warning(),
+and end up hitting the BUG() in btrfs_extent_inline_ref_size.
+
+Reported-by: Konstantinos Skarlatos <k.skarlatos@gmail.com>
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c |   30 +++++++++++++++++++-----------
+ fs/btrfs/backref.h |    4 ++--
+ fs/btrfs/scrub.c   |    5 +++--
+ 3 files changed, 24 insertions(+), 15 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1412,9 +1412,10 @@ int extent_from_logical(struct btrfs_fs_
+  * returns <0 on error
+  */
+ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+-                              struct btrfs_extent_item *ei, u32 item_size,
+-                              struct btrfs_extent_inline_ref **out_eiref,
+-                              int *out_type)
++                                 struct btrfs_key *key,
++                                 struct btrfs_extent_item *ei, u32 item_size,
++                                 struct btrfs_extent_inline_ref **out_eiref,
++                                 int *out_type)
+ {
+       unsigned long end;
+       u64 flags;
+@@ -1424,9 +1425,16 @@ static int __get_extent_inline_ref(unsig
+               /* first call */
+               flags = btrfs_extent_flags(eb, ei);
+               if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+-                      info = (struct btrfs_tree_block_info *)(ei + 1);
+-                      *out_eiref =
+-                              (struct btrfs_extent_inline_ref *)(info + 1);
++                      if (key->type == BTRFS_METADATA_ITEM_KEY) {
++                              /* a skinny metadata extent */
++                              *out_eiref =
++                                   (struct btrfs_extent_inline_ref *)(ei + 1);
++                      } else {
++                              WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
++                              info = (struct btrfs_tree_block_info *)(ei + 1);
++                              *out_eiref =
++                                 (struct btrfs_extent_inline_ref *)(info + 1);
++                      }
+               } else {
+                       *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+               }
+@@ -1436,7 +1444,7 @@ static int __get_extent_inline_ref(unsig
+       }
+       end = (unsigned long)ei + item_size;
+-      *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
++      *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
+       *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
+       *ptr += btrfs_extent_inline_ref_size(*out_type);
+@@ -1455,8 +1463,8 @@ static int __get_extent_inline_ref(unsig
+  * <0 on error.
+  */
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+-                              struct btrfs_extent_item *ei, u32 item_size,
+-                              u64 *out_root, u8 *out_level)
++                          struct btrfs_key *key, struct btrfs_extent_item *ei,
++                          u32 item_size, u64 *out_root, u8 *out_level)
+ {
+       int ret;
+       int type;
+@@ -1467,8 +1475,8 @@ int tree_backref_for_extent(unsigned lon
+               return 1;
+       while (1) {
+-              ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
+-                                              &eiref, &type);
++              ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
++                                            &eiref, &type);
+               if (ret < 0)
+                       return ret;
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_
+                       u64 *flags);
+ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+-                              struct btrfs_extent_item *ei, u32 item_size,
+-                              u64 *out_root, u8 *out_level);
++                          struct btrfs_key *key, struct btrfs_extent_item *ei,
++                          u32 item_size, u64 *out_root, u8 *out_level);
+ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+                               u64 extent_item_objectid,
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -577,8 +577,9 @@ static void scrub_print_warning(const ch
+       if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+               do {
+-                      ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
+-                                                      &ref_root, &ref_level);
++                      ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
++                                                    item_size, &ref_root,
++                                                    &ref_level);
+                       printk_in_rcu(KERN_WARNING
+                               "BTRFS: %s at logical %llu on dev %s, "
+                               "sector %llu: metadata %s (level %d) in tree "
diff --git a/queue-3.14/btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch b/queue-3.14/btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch
new file mode 100644 (file)
index 0000000..19d5c1d
--- /dev/null
@@ -0,0 +1,45 @@
+From 3e2426bd0eb980648449e7a2f5a23e3cd3c7725c Mon Sep 17 00:00:00 2001
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Thu, 12 Jun 2014 00:39:58 -0500
+Subject: btrfs: fix use of uninit "ret" in end_extent_writepage()
+
+From: Eric Sandeen <sandeen@redhat.com>
+
+commit 3e2426bd0eb980648449e7a2f5a23e3cd3c7725c upstream.
+
+If this condition in end_extent_writepage() is false:
+
+       if (tree->ops && tree->ops->writepage_end_io_hook)
+
+we will then test an uninitialized "ret" at:
+
+       ret = ret < 0 ? ret : -EIO;
+
+The test for ret is for the case where ->writepage_end_io_hook
+failed, and we'd choose that ret as the error; but if
+there is no ->writepage_end_io_hook, nothing sets ret.
+
+Initializing ret to 0 should be sufficient; if
+writepage_end_io_hook wasn't set, (!uptodate) means
+non-zero err was passed in, so we choose -EIO in that case.
+
+Signed-of-by: Eric Sandeen <sandeen@redhat.com>
+
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2347,7 +2347,7 @@ int end_extent_writepage(struct page *pa
+ {
+       int uptodate = (err == 0);
+       struct extent_io_tree *tree;
+-      int ret;
++      int ret = 0;
+       tree = &BTRFS_I(page->mapping->host)->io_tree;
diff --git a/queue-3.14/btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch b/queue-3.14/btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch
new file mode 100644 (file)
index 0000000..4dd49bf
--- /dev/null
@@ -0,0 +1,44 @@
+From de348ee022175401e77d7662b7ca6e231a94e3fd Mon Sep 17 00:00:00 2001
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Wed, 9 Apr 2014 19:23:22 +0800
+Subject: Btrfs: make sure there are not any read requests before stopping workers
+
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+
+commit de348ee022175401e77d7662b7ca6e231a94e3fd upstream.
+
+In close_ctree(), after we have stopped all workers,there maybe still
+some read requests(for example readahead) to submit and this *maybe* trigger
+an oops that user reported before:
+
+kernel BUG at fs/btrfs/async-thread.c:619!
+
+By hacking codes, i can reproduce this problem with one cpu available.
+We fix this potential problem by invalidating all btree inode pages before
+stopping all workers.
+
+Thanks to Miao for pointing out this problem.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Reviewed-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/disk-io.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3598,6 +3598,11 @@ int close_ctree(struct btrfs_root *root)
+       btrfs_free_block_groups(fs_info);
++      /*
++       * we must make sure there is not any read request to
++       * submit after we stopping all workers.
++       */
++      invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+       btrfs_stop_all_workers(fs_info);
+       free_root_pointers(fs_info, 1);
diff --git a/queue-3.14/btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch b/queue-3.14/btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch
new file mode 100644 (file)
index 0000000..ec708c7
--- /dev/null
@@ -0,0 +1,34 @@
+From 5dca6eea91653e9949ce6eb9e9acab6277e2f2c4 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Mon, 12 May 2014 12:47:36 +0800
+Subject: Btrfs: mark mapping with error flag to report errors to userspace
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit 5dca6eea91653e9949ce6eb9e9acab6277e2f2c4 upstream.
+
+According to commit 865ffef3797da2cac85b3354b5b6050dc9660978
+(fs: fix fsync() error reporting),
+it's not stable to just check error pages because pages can be
+truncated or invalidated, we should also mark mapping with error
+flag so that a later fsync can catch the error.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2361,6 +2361,8 @@ int end_extent_writepage(struct page *pa
+       if (!uptodate) {
+               ClearPageUptodate(page);
+               SetPageError(page);
++              ret = ret < 0 ? ret : -EIO;
++              mapping_set_error(page->mapping, ret);
+       }
+       return 0;
+ }
diff --git a/queue-3.14/btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch b/queue-3.14/btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch
new file mode 100644 (file)
index 0000000..2e370f6
--- /dev/null
@@ -0,0 +1,42 @@
+From 32d6b47fe6fc1714d5f1bba1b9f38e0ab0ad58a8 Mon Sep 17 00:00:00 2001
+From: Miao Xie <miaox@cn.fujitsu.com>
+Date: Thu, 24 Apr 2014 13:31:55 +0800
+Subject: Btrfs: output warning instead of error when loading free space cache failed
+
+From: Miao Xie <miaox@cn.fujitsu.com>
+
+commit 32d6b47fe6fc1714d5f1bba1b9f38e0ab0ad58a8 upstream.
+
+If we fail to load a free space cache, we can rebuild it from the extent tree,
+so it is not a serious error, we should not output a error message that
+would make the users uncomfortable. This patch uses warning message instead
+of it.
+
+Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/free-space-cache.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -831,7 +831,7 @@ int load_free_space_cache(struct btrfs_f
+       if (!matched) {
+               __btrfs_remove_free_space_cache(ctl);
+-              btrfs_err(fs_info, "block group %llu has wrong amount of free space",
++              btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
+                       block_group->key.objectid);
+               ret = -1;
+       }
+@@ -843,7 +843,7 @@ out:
+               spin_unlock(&block_group->lock);
+               ret = 0;
+-              btrfs_err(fs_info, "failed to load free space cache for block group %llu",
++              btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
+                       block_group->key.objectid);
+       }
diff --git a/queue-3.14/btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch b/queue-3.14/btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch
new file mode 100644 (file)
index 0000000..cb2c705
--- /dev/null
@@ -0,0 +1,55 @@
+From 1af56070e3ef9477dbc7eba3b9ad7446979c7974 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@gmail.com>
+Date: Sun, 25 May 2014 04:49:24 +0100
+Subject: Btrfs: send, don't error in the presence of subvols/snapshots
+
+From: Filipe Manana <fdmanana@gmail.com>
+
+commit 1af56070e3ef9477dbc7eba3b9ad7446979c7974 upstream.
+
+If we are doing an incremental send and the base snapshot has a
+directory with name X that doesn't exist anymore in the second
+snapshot and a new subvolume/snapshot exists in the second snapshot
+that has the same name as the directory (name X), the incremental
+send would fail with -ENOENT error. This is because it attempts
+to lookup for an inode with a number matching the objectid of a
+root, which doesn't exist.
+
+Steps to reproduce:
+
+    mkfs.btrfs -f /dev/sdd
+    mount /dev/sdd /mnt
+
+    mkdir /mnt/testdir
+    btrfs subvolume snapshot -r /mnt /mnt/mysnap1
+
+    rmdir /mnt/testdir
+    btrfs subvolume create /mnt/testdir
+    btrfs subvolume snapshot -r /mnt /mnt/mysnap2
+
+    btrfs send -p /mnt/mysnap1 /mnt/mysnap2 -f /tmp/send.data
+
+A test case for xfstests follows.
+
+Reported-by: Robert White <rwhite@pobox.com>
+Signed-off-by: Filipe David Borba Manana <fdmanana@gmail.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1589,6 +1589,10 @@ static int lookup_dir_item_inode(struct
+               goto out;
+       }
+       btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
++      if (key.type == BTRFS_ROOT_ITEM_KEY) {
++              ret = -ENOENT;
++              goto out;
++      }
+       *found_inode = key.objectid;
+       *found_type = btrfs_dir_type(path->nodes[0], di);
diff --git a/queue-3.14/btrfs-set-right-total-device-count-for-seeding-support.patch b/queue-3.14/btrfs-set-right-total-device-count-for-seeding-support.patch
new file mode 100644 (file)
index 0000000..5fbd682
--- /dev/null
@@ -0,0 +1,45 @@
+From 298658414a2f0bea1f05a81876a45c1cd96aa2e0 Mon Sep 17 00:00:00 2001
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Tue, 13 May 2014 17:05:06 +0800
+Subject: Btrfs: set right total device count for seeding support
+
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+
+commit 298658414a2f0bea1f05a81876a45c1cd96aa2e0 upstream.
+
+Seeding device support allows us to create a new filesystem
+based on existed filesystem.
+
+However newly created filesystem's @total_devices should include seed
+devices. This patch fix the following problem:
+
+ # mkfs.btrfs -f /dev/sdb
+ # btrfstune -S 1 /dev/sdb
+ # mount /dev/sdb /mnt
+ # btrfs device add -f /dev/sdc /mnt --->fs_devices->total_devices = 1
+ # umount /mnt
+ # mount /dev/sdc /mnt               --->fs_devices->total_devices = 2
+
+This is because we record right @total_devices in superblock, but
+@fs_devices->total_devices is reset to be 0 in btrfs_prepare_sprout().
+
+Fix this problem by not resetting @fs_devices->total_devices.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1889,7 +1889,6 @@ static int btrfs_prepare_sprout(struct b
+       fs_devices->seeding = 0;
+       fs_devices->num_devices = 0;
+       fs_devices->open_devices = 0;
+-      fs_devices->total_devices = 0;
+       fs_devices->seed = seed_devices;
+       generate_random_uuid(fs_devices->fsid);
diff --git a/queue-3.14/btrfs-use-right-type-to-get-real-comparison.patch b/queue-3.14/btrfs-use-right-type-to-get-real-comparison.patch
new file mode 100644 (file)
index 0000000..f852035
--- /dev/null
@@ -0,0 +1,31 @@
+From cd857dd6bc2ae9ecea14e75a34e8a8fdc158e307 Mon Sep 17 00:00:00 2001
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Sun, 8 Jun 2014 19:04:13 +0800
+Subject: Btrfs: use right type to get real comparison
+
+From: Liu Bo <bo.li.liu@oracle.com>
+
+commit cd857dd6bc2ae9ecea14e75a34e8a8fdc158e307 upstream.
+
+We want to make sure the point is still within the extent item, not to verify
+the memory it's pointing to.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/backref.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1431,7 +1431,7 @@ static int __get_extent_inline_ref(unsig
+                       *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+               }
+               *ptr = (unsigned long)*out_eiref;
+-              if ((void *)*ptr >= (void *)ei + item_size)
++              if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
+                       return -ENOENT;
+       }
diff --git a/queue-3.14/cifs-fix-memory-leaks-in-smb2_open.patch b/queue-3.14/cifs-fix-memory-leaks-in-smb2_open.patch
new file mode 100644 (file)
index 0000000..49862c7
--- /dev/null
@@ -0,0 +1,57 @@
+From 663a962151593c69374776e8651238d0da072459 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilovsky@samba.org>
+Date: Sat, 24 May 2014 16:42:02 +0400
+Subject: CIFS: Fix memory leaks in SMB2_open
+
+From: Pavel Shilovsky <pshilovsky@samba.org>
+
+commit 663a962151593c69374776e8651238d0da072459 upstream.
+
+Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
+Reviewed-by: Shirish Pargaonkar <spargaonkar@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1089,6 +1089,7 @@ SMB2_open(const unsigned int xid, struct
+       int rc = 0;
+       unsigned int num_iovecs = 2;
+       __u32 file_attributes = 0;
++      char *dhc_buf = NULL, *lc_buf = NULL;
+       cifs_dbg(FYI, "create/open\n");
+@@ -1155,6 +1156,7 @@ SMB2_open(const unsigned int xid, struct
+                       kfree(copy_path);
+                       return rc;
+               }
++              lc_buf = iov[num_iovecs-1].iov_base;
+       }
+       if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
+@@ -1169,9 +1171,10 @@ SMB2_open(const unsigned int xid, struct
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+-                      kfree(iov[num_iovecs-1].iov_base);
++                      kfree(lc_buf);
+                       return rc;
+               }
++              dhc_buf = iov[num_iovecs-1].iov_base;
+       }
+       rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+@@ -1203,6 +1206,8 @@ SMB2_open(const unsigned int xid, struct
+               *oplock = rsp->OplockLevel;
+ creat_exit:
+       kfree(copy_path);
++      kfree(lc_buf);
++      kfree(dhc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+ }
diff --git a/queue-3.14/fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch b/queue-3.14/fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch
new file mode 100644 (file)
index 0000000..5563e84
--- /dev/null
@@ -0,0 +1,38 @@
+From 8321cf2596d283821acc466377c2b85bcd3422b7 Mon Sep 17 00:00:00 2001
+From: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+Date: Thu, 22 May 2014 22:43:43 +0200
+Subject: fs: btrfs: volumes.c: Fix for possible null pointer dereference
+
+From: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+
+commit 8321cf2596d283821acc466377c2b85bcd3422b7 upstream.
+
+There is otherwise a risk of a possible null pointer dereference.
+
+Was largely found by using a static code analysis program called cppcheck.
+
+Signed-off-by: Rickard Strandqvist <rickard_strandqvist@spectrumdigital.se>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1676,11 +1676,12 @@ int btrfs_rm_device(struct btrfs_root *r
+               struct btrfs_fs_devices *fs_devices;
+               fs_devices = root->fs_info->fs_devices;
+               while (fs_devices) {
+-                      if (fs_devices->seed == cur_devices)
++                      if (fs_devices->seed == cur_devices) {
++                              fs_devices->seed = cur_devices->seed;
+                               break;
++                      }
+                       fs_devices = fs_devices->seed;
+               }
+-              fs_devices->seed = cur_devices->seed;
+               cur_devices->seed = NULL;
+               lock_chunks(root);
+               __btrfs_close_devices(cur_devices);
diff --git a/queue-3.14/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch b/queue-3.14/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
new file mode 100644 (file)
index 0000000..75e7003
--- /dev/null
@@ -0,0 +1,216 @@
+From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 7 Mar 2013 14:53:45 +0100
+Subject: genirq: Sanitize spurious interrupt detection of threaded irqs
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c upstream.
+
+Till reported that the spurious interrupt detection of threaded
+interrupts is broken in two ways:
+
+- note_interrupt() is called for each action thread of a shared
+  interrupt line. That's wrong as we are only interested whether none
+  of the device drivers felt responsible for the interrupt, but by
+  calling multiple times for a single interrupt line we account
+  IRQ_NONE even if one of the drivers felt responsible.
+
+- note_interrupt() when called from the thread handler is not
+  serialized. That leaves the members of irq_desc which are used for
+  the spurious detection unprotected.
+
+To solve this we need to defer the spurious detection of a threaded
+interrupt to the next hardware interrupt context where we have
+implicit serialization.
+
+If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we
+check whether the previous interrupt requested a deferred check. If
+not, we request a deferred check for the next hardware interrupt and
+return.
+
+If set, we check whether one of the interrupt threads signaled
+success. Depending on this information we feed the result into the
+spurious detector.
+
+If one primary handler of a shared interrupt returns IRQ_HANDLED we
+disable the deferred check of irq threads on the same line, as we have
+found at least one device driver who cared.
+
+Reported-by: Till Straumann <strauman@slac.stanford.edu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Austin Schuh <austin@peloton-tech.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Wolfgang Grandegger <wg@grandegger.com>
+Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-can@vger.kernel.org
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/irqdesc.h |    4 +
+ kernel/irq/manage.c     |    4 -
+ kernel/irq/spurious.c   |  106 ++++++++++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 108 insertions(+), 6 deletions(-)
+
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -27,6 +27,8 @@ struct irq_desc;
+  * @irq_count:                stats field to detect stalled irqs
+  * @last_unhandled:   aging timer for unhandled count
+  * @irqs_unhandled:   stats field for spurious unhandled interrupts
++ * @threads_handled:  stats field for deferred spurious detection of threaded handlers
++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+  * @lock:             locking for SMP
+  * @affinity_hint:    hint to user space for preferred irq affinity
+  * @affinity_notify:  context for notification of affinity changes
+@@ -52,6 +54,8 @@ struct irq_desc {
+       unsigned int            irq_count;      /* For detecting broken IRQs */
+       unsigned long           last_unhandled; /* Aging timer for unhandled count */
+       unsigned int            irqs_unhandled;
++      atomic_t                threads_handled;
++      int                     threads_handled_last;
+       raw_spinlock_t          lock;
+       struct cpumask          *percpu_enabled;
+ #ifdef CONFIG_SMP
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -856,8 +856,8 @@ static int irq_thread(void *data)
+               irq_thread_check_affinity(desc, action);
+               action_ret = handler_fn(desc, action);
+-              if (!noirqdebug)
+-                      note_interrupt(action->irq, desc, action_ret);
++              if (action_ret == IRQ_HANDLED)
++                      atomic_inc(&desc->threads_handled);
+               wake_threads_waitq(desc);
+       }
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, stru
+       return action && (action->flags & IRQF_IRQPOLL);
+ }
++#define SPURIOUS_DEFERRED     0x80000000
++
+ void note_interrupt(unsigned int irq, struct irq_desc *desc,
+                   irqreturn_t action_ret)
+ {
+@@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, st
+           irq_settings_is_polled(desc))
+               return;
+-      /* we get here again via the threaded handler */
+-      if (action_ret == IRQ_WAKE_THREAD)
+-              return;
+-
+       if (bad_action_ret(action_ret)) {
+               report_bad_irq(irq, desc, action_ret);
+               return;
+       }
++      /*
++       * We cannot call note_interrupt from the threaded handler
++       * because we need to look at the compound of all handlers
++       * (primary and threaded). Aside of that in the threaded
++       * shared case we have no serialization against an incoming
++       * hardware interrupt while we are dealing with a threaded
++       * result.
++       *
++       * So in case a thread is woken, we just note the fact and
++       * defer the analysis to the next hardware interrupt.
++       *
++       * The threaded handlers store whether they sucessfully
++       * handled an interrupt and we check whether that number
++       * changed versus the last invocation.
++       *
++       * We could handle all interrupts with the delayed by one
++       * mechanism, but for the non forced threaded case we'd just
++       * add pointless overhead to the straight hardirq interrupts
++       * for the sake of a few lines less code.
++       */
++      if (action_ret & IRQ_WAKE_THREAD) {
++              /*
++               * There is a thread woken. Check whether one of the
++               * shared primary handlers returned IRQ_HANDLED. If
++               * not we defer the spurious detection to the next
++               * interrupt.
++               */
++              if (action_ret == IRQ_WAKE_THREAD) {
++                      int handled;
++                      /*
++                       * We use bit 31 of thread_handled_last to
++                       * denote the deferred spurious detection
++                       * active. No locking necessary as
++                       * thread_handled_last is only accessed here
++                       * and we have the guarantee that hard
++                       * interrupts are not reentrant.
++                       */
++                      if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
++                              desc->threads_handled_last |= SPURIOUS_DEFERRED;
++                              return;
++                      }
++                      /*
++                       * Check whether one of the threaded handlers
++                       * returned IRQ_HANDLED since the last
++                       * interrupt happened.
++                       *
++                       * For simplicity we just set bit 31, as it is
++                       * set in threads_handled_last as well. So we
++                       * avoid extra masking. And we really do not
++                       * care about the high bits of the handled
++                       * count. We just care about the count being
++                       * different than the one we saw before.
++                       */
++                      handled = atomic_read(&desc->threads_handled);
++                      handled |= SPURIOUS_DEFERRED;
++                      if (handled != desc->threads_handled_last) {
++                              action_ret = IRQ_HANDLED;
++                              /*
++                               * Note: We keep the SPURIOUS_DEFERRED
++                               * bit set. We are handling the
++                               * previous invocation right now.
++                               * Keep it for the current one, so the
++                               * next hardware interrupt will
++                               * account for it.
++                               */
++                              desc->threads_handled_last = handled;
++                      } else {
++                              /*
++                               * None of the threaded handlers felt
++                               * responsible for the last interrupt
++                               *
++                               * We keep the SPURIOUS_DEFERRED bit
++                               * set in threads_handled_last as we
++                               * need to account for the current
++                               * interrupt as well.
++                               */
++                              action_ret = IRQ_NONE;
++                      }
++              } else {
++                      /*
++                       * One of the primary handlers returned
++                       * IRQ_HANDLED. So we don't care about the
++                       * threaded handlers on the same line. Clear
++                       * the deferred detection bit.
++                       *
++                       * In theory we could/should check whether the
++                       * deferred bit is set and take the result of
++                       * the previous run into account here as
++                       * well. But it's really not worth the
++                       * trouble. If every other interrupt is
++                       * handled we never trigger the spurious
++                       * detector. And if this is just the one out
++                       * of 100k unhandled ones which is handled
++                       * then we merily delay the spurious detection
++                       * by one hard interrupt. Not a real problem.
++                       */
++                      desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
++              }
++      }
++
+       if (unlikely(action_ret == IRQ_NONE)) {
+               /*
+                * If we are seeing only the odd spurious IRQ caused by
index c795c2ccf55db3f592b76fd08b702786b7627f07..6d6398580fc3be21af36a66dbdecc88efffffaf3 100644 (file)
@@ -80,3 +80,26 @@ target-iscsi-fix-sendtargets-response-pdu-for-iser-transport.patch
 target-report-correct-response-length-for-some-commands.patch
 target-explicitly-clear-ramdisk_mcp-backend-pages.patch
 scsi-fix-spurious-request-sense-in-error-handling.patch
+arm-mvebu-dt-fix-openblocks-ax3-4-ram-size.patch
+arm64-dma-removing-arch_has_dma_get_required_mask-macro.patch
+x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
+x86-x32-use-compat-shims-for-io_-setup-submit.patch
+genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
+aio-fix-aio-request-leak-when-events-are-reaped-by-userspace.patch
+aio-fix-kernel-memory-disclosure-in-io_getevents-introduced-in-v3.10.patch
+cifs-fix-memory-leaks-in-smb2_open.patch
+btrfs-fix-double-free-in-find_lock_delalloc_range.patch
+btrfs-add-ctime-mtime-update-for-btrfs-device-add-remove.patch
+btrfs-output-warning-instead-of-error-when-loading-free-space-cache-failed.patch
+btrfs-make-sure-there-are-not-any-read-requests-before-stopping-workers.patch
+btrfs-fix-null-pointer-crash-of-deleting-a-seed-device.patch
+btrfs-mark-mapping-with-error-flag-to-report-errors-to-userspace.patch
+btrfs-set-right-total-device-count-for-seeding-support.patch
+btrfs-send-don-t-error-in-the-presence-of-subvols-snapshots.patch
+fs-btrfs-volumes.c-fix-for-possible-null-pointer-dereference.patch
+btrfs-don-t-check-nodes-for-extent-items.patch
+btrfs-use-right-type-to-get-real-comparison.patch
+btrfs-fix-scrub_print_warning-to-handle-skinny-metadata-extents.patch
+btrfs-fix-use-of-uninit-ret-in-end_extent_writepage.patch
+btrfs-fix-lockdep-warning-with-reclaim-lock-inversion.patch
+btrfs-allocate-raid-type-kobjects-dynamically.patch
diff --git a/queue-3.14/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch b/queue-3.14/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
new file mode 100644 (file)
index 0000000..c6f4b04
--- /dev/null
@@ -0,0 +1,37 @@
+From 246f2d2ee1d715e1077fc47d61c394569c8ee692 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Wed, 30 Apr 2014 14:03:25 -0700
+Subject: x86-32, espfix: Remove filter for espfix32 due to race
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit 246f2d2ee1d715e1077fc47d61c394569c8ee692 upstream.
+
+It is not safe to use LAR to filter when to go down the espfix path,
+because the LDT is per-process (rather than per-thread) and another
+thread might change the descriptors behind our back.  Fortunately it
+is always *safe* (if a bit slow) to go down the espfix path, and a
+32-bit LDT stack segment is extremely rare.
+
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_32.S |    5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -551,11 +551,6 @@ ENTRY(iret_exc)
+       CFI_RESTORE_STATE
+ ldt_ss:
+-      larl PT_OLDSS(%esp), %eax
+-      jnz restore_nocheck
+-      testl $0x00400000, %eax         # returning to 32bit stack?
+-      jnz restore_nocheck             # allright, normal return
+-
+ #ifdef CONFIG_PARAVIRT
+       /*
+        * The kernel can't run on a non-flat stack if paravirt mode
diff --git a/queue-3.14/x86-x32-use-compat-shims-for-io_-setup-submit.patch b/queue-3.14/x86-x32-use-compat-shims-for-io_-setup-submit.patch
new file mode 100644 (file)
index 0000000..6d2e468
--- /dev/null
@@ -0,0 +1,57 @@
+From 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 Mon Sep 17 00:00:00 2001
+From: Mike Frysinger <vapier@gentoo.org>
+Date: Sun, 4 May 2014 20:43:15 -0400
+Subject: x86, x32: Use compat shims for io_{setup,submit}
+
+From: Mike Frysinger <vapier@gentoo.org>
+
+commit 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 upstream.
+
+The io_setup takes a pointer to a context id of type aio_context_t.
+This in turn is typed to a __kernel_ulong_t.  We could tweak the
+exported headers to define this as a 64bit quantity for specific
+ABIs, but since we already have a 32bit compat shim for the x86 ABI,
+let's just re-use that logic.  The libaio package is also written to
+expect this as a pointer type, so a compat shim would simplify that.
+
+The io_submit func operates on an array of pointers to iocb structs.
+Padding out the array to be 64bit aligned is a huge pain, so convert
+it over to the existing compat shim too.
+
+We don't convert io_getevents to the compat func as its only purpose
+is to handle the timespec struct, and the x32 ABI uses 64bit times.
+
+With this change, the libaio package can now pass its testsuite when
+built for the x32 ABI.
+
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Link: http://lkml.kernel.org/r/1399250595-5005-1-git-send-email-vapier@gentoo.org
+Cc: H.J. Lu <hjl.tools@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/syscalls/syscall_64.tbl |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -212,10 +212,10 @@
+ 203   common  sched_setaffinity       sys_sched_setaffinity
+ 204   common  sched_getaffinity       sys_sched_getaffinity
+ 205   64      set_thread_area
+-206   common  io_setup                sys_io_setup
++206   64      io_setup                sys_io_setup
+ 207   common  io_destroy              sys_io_destroy
+ 208   common  io_getevents            sys_io_getevents
+-209   common  io_submit               sys_io_submit
++209   64      io_submit               sys_io_submit
+ 210   common  io_cancel               sys_io_cancel
+ 211   64      get_thread_area
+ 212   common  lookup_dcookie          sys_lookup_dcookie
+@@ -358,3 +358,5 @@
+ 540   x32     process_vm_writev       compat_sys_process_vm_writev
+ 541   x32     setsockopt              compat_sys_setsockopt
+ 542   x32     getsockopt              compat_sys_getsockopt
++543   x32     io_setup                compat_sys_io_setup
++544   x32     io_submit               compat_sys_io_submit