]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:28:07 +0000 (16:28 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:28:07 +0000 (16:28 -0700)
added patches:
btrfs-change-how-we-queue-blocks-for-backref-checking.patch
btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
btrfs-reset-ret-in-record_one_backref.patch
btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
mmc-fix-null-pointer-use-in-mmc_blk_remove_req.patch
s390-fix-system-call-restart-after-inferior-call.patch
tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch

queue-3.11/btrfs-change-how-we-queue-blocks-for-backref-checking.patch [new file with mode: 0644]
queue-3.11/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch [new file with mode: 0644]
queue-3.11/btrfs-reset-ret-in-record_one_backref.patch [new file with mode: 0644]
queue-3.11/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch [new file with mode: 0644]
queue-3.11/mmc-fix-null-pointer-use-in-mmc_blk_remove_req.patch [new file with mode: 0644]
queue-3.11/s390-fix-system-call-restart-after-inferior-call.patch [new file with mode: 0644]
queue-3.11/series
queue-3.11/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch [new file with mode: 0644]

diff --git a/queue-3.11/btrfs-change-how-we-queue-blocks-for-backref-checking.patch b/queue-3.11/btrfs-change-how-we-queue-blocks-for-backref-checking.patch
new file mode 100644 (file)
index 0000000..6a86234
--- /dev/null
@@ -0,0 +1,68 @@
+From b6c60c8018c4e9beb2f83fc82c09f9d033766571 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Tue, 30 Jul 2013 16:30:30 -0400
+Subject: Btrfs: change how we queue blocks for backref checking
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit b6c60c8018c4e9beb2f83fc82c09f9d033766571 upstream.
+
+Previously we only added blocks to the list to have their backrefs checked if
+the level of the block is right above the one we are searching for.  This is
+because we want to make sure we don't add the entire path up to the root to the
+lists to make sure we process things one at a time.  This assumes that if any
+blocks in the path to the root are going to be not checked (shared in other
+words) then they will be in the level right above the current block on up.  This
+isn't quite right though since we can have blocks higher up the list that are
+shared because they are attached to a reloc root.  But we won't add this block
+to be checked and then later on we will BUG_ON(!upper->checked).  So instead
+keep track of wether or not we've queued a block to be checked in this current
+search, and if we haven't go ahead and queue it to be checked.  This patch fixed
+the panic I was seeing where we BUG_ON(!upper->checked).  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/relocation.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -691,6 +691,7 @@ struct backref_node *build_backref_tree(
+       int cowonly;
+       int ret;
+       int err = 0;
++      bool need_check = true;
+       path1 = btrfs_alloc_path();
+       path2 = btrfs_alloc_path();
+@@ -914,6 +915,7 @@ again:
+                       cur->bytenr);
+               lower = cur;
++              need_check = true;
+               for (; level < BTRFS_MAX_LEVEL; level++) {
+                       if (!path2->nodes[level]) {
+                               BUG_ON(btrfs_root_bytenr(&root->root_item) !=
+@@ -957,14 +959,12 @@ again:
+                               /*
+                                * add the block to pending list if we
+-                               * need check its backrefs. only block
+-                               * at 'cur->level + 1' is added to the
+-                               * tail of pending list. this guarantees
+-                               * we check backrefs from lower level
+-                               * blocks to upper level blocks.
++                               * need check its backrefs, we only do this once
++                               * while walking up a tree as we will catch
++                               * anything else later on.
+                                */
+-                              if (!upper->checked &&
+-                                  level == cur->level + 1) {
++                              if (!upper->checked && need_check) {
++                                      need_check = false;
+                                       list_add_tail(&edge->list[UPPER],
+                                                     &list);
+                               } else
diff --git a/queue-3.11/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch b/queue-3.11/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
new file mode 100644 (file)
index 0000000..47c20db
--- /dev/null
@@ -0,0 +1,42 @@
+From b8d0c69b9469ffd33df30fee3e990f2d4aa68a09 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Thu, 22 Aug 2013 17:03:29 -0400
+Subject: Btrfs: remove ourselves from the cluster list under lock
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit b8d0c69b9469ffd33df30fee3e990f2d4aa68a09 upstream.
+
+A user was reporting weird warnings from btrfs_put_delayed_ref() and I noticed
+that we were doing this list_del_init() on our head ref outside of
+delayed_refs->lock.  This is a problem if we have people still on the list, we
+could end up modifying old pointers and such.  Fix this by removing us from the
+list before we do our run_delayed_ref on our head ref.  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent-tree.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2403,6 +2403,8 @@ static noinline int run_clustered_refs(s
+                       default:
+                               WARN_ON(1);
+                       }
++              } else {
++                      list_del_init(&locked_ref->cluster);
+               }
+               spin_unlock(&delayed_refs->lock);
+@@ -2425,7 +2427,6 @@ static noinline int run_clustered_refs(s
+                * list before we release it.
+                */
+               if (btrfs_delayed_ref_is_head(ref)) {
+-                      list_del_init(&locked_ref->cluster);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       locked_ref = NULL;
+               }
diff --git a/queue-3.11/btrfs-reset-ret-in-record_one_backref.patch b/queue-3.11/btrfs-reset-ret-in-record_one_backref.patch
new file mode 100644 (file)
index 0000000..1c6a53f
--- /dev/null
@@ -0,0 +1,42 @@
+From 50f1319cb5f7690e4d9de18d1a75ea89296d0e53 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Mon, 22 Jul 2013 12:50:37 -0400
+Subject: Btrfs: reset ret in record_one_backref
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit 50f1319cb5f7690e4d9de18d1a75ea89296d0e53 upstream.
+
+I was getting warnings when running find ./ -type f -exec btrfs fi defrag -f {}
+\; from record_one_backref because ret was set.  Turns out it was because it was
+set to 1 because the search slot didn't come out exact and we never reset it.
+So reset it to 0 right after the search so we don't leak this and get
+uneccessary warnings.  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/inode.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2132,6 +2132,7 @@ static noinline int record_one_backref(u
+               WARN_ON(1);
+               return ret;
+       }
++      ret = 0;
+       while (1) {
+               cond_resched();
+@@ -2181,8 +2182,6 @@ static noinline int record_one_backref(u
+                   old->len || extent_offset + num_bytes <=
+                   old->extent_offset + old->offset)
+                       continue;
+-
+-              ret = 0;
+               break;
+       }
diff --git a/queue-3.11/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch b/queue-3.11/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
new file mode 100644 (file)
index 0000000..00dc7db
--- /dev/null
@@ -0,0 +1,39 @@
+From a05254143cd183b18002cbba7759a1e4629aa762 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Mon, 12 Aug 2013 10:56:14 -0400
+Subject: Btrfs: skip subvol entries when checking if we've created a dir already
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit a05254143cd183b18002cbba7759a1e4629aa762 upstream.
+
+We have logic to see if we've already created a parent directory by check to see
+if an inode inside of that directory has a lower inode number than the one we
+are currently processing.  The logic is that if there is a lower inode number
+then we would have had to made sure the directory was created at that previous
+point.  The problem is that subvols inode numbers count from the lowest objectid
+in the root tree, which may be less than our current progress.  So just skip if
+our dir item key is a root item.  This fixes the original test and the xfstest
+version I made that added an extra subvol create.  Thanks,
+
+Reported-by: Emil Karlson <jekarlson@gmail.com>
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -2519,7 +2519,8 @@ static int did_create_dir(struct send_ct
+               di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+               btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+-              if (di_key.objectid < sctx->send_progress) {
++              if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
++                  di_key.objectid < sctx->send_progress) {
+                       ret = 1;
+                       goto out;
+               }
diff --git a/queue-3.11/mmc-fix-null-pointer-use-in-mmc_blk_remove_req.patch b/queue-3.11/mmc-fix-null-pointer-use-in-mmc_blk_remove_req.patch
new file mode 100644 (file)
index 0000000..b743789
--- /dev/null
@@ -0,0 +1,38 @@
+From 8efb83a2f8518a6ffcc074177f8d659c5165ef37 Mon Sep 17 00:00:00 2001
+From: Franck Jullien <franck.jullien@gmail.com>
+Date: Wed, 24 Jul 2013 15:17:48 +0200
+Subject: mmc: fix null pointer use in mmc_blk_remove_req
+
+From: Franck Jullien <franck.jullien@gmail.com>
+
+commit 8efb83a2f8518a6ffcc074177f8d659c5165ef37 upstream.
+
+A previous commit (fdfa20c1631210d0) reordered the shutdown sequence
+in mmc_blk_remove_req. However, mmc_cleanup_queue is now called before
+we get the card pointer, and mmc_cleanup_queue sets mq->card to NULL.
+
+This patch moves the card pointer assignment before mmc_cleanup_queue.
+
+Signed-off-by: Franck Jullien <franck.jullien@gmail.com>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/card/block.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -2191,10 +2191,10 @@ static void mmc_blk_remove_req(struct mm
+                * is freeing the queue that stops new requests
+                * from being accepted.
+                */
++              card = md->queue.card;
+               mmc_cleanup_queue(&md->queue);
+               if (md->flags & MMC_BLK_PACKED_CMD)
+                       mmc_packed_clean(&md->queue);
+-              card = md->queue.card;
+               if (md->disk->flags & GENHD_FL_UP) {
+                       device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+                       if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/queue-3.11/s390-fix-system-call-restart-after-inferior-call.patch b/queue-3.11/s390-fix-system-call-restart-after-inferior-call.patch
new file mode 100644 (file)
index 0000000..d020dc6
--- /dev/null
@@ -0,0 +1,43 @@
+From dbbfe487e5f3fc00c9fe5207d63309859704d12f Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Fri, 27 Sep 2013 15:24:38 +0200
+Subject: s390: fix system call restart after inferior call
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit dbbfe487e5f3fc00c9fe5207d63309859704d12f upstream.
+
+Git commit 616498813b11ffef "s390: system call path micro optimization"
+introduced a regression in regard to system call restarting and inferior
+function calls via the ptrace interface. The pointer to the system call
+table needs to be loaded in sysc_sigpending if do_signal returns with
+TIF_SYSCALl set after it restored a system call context.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/entry.S   |    1 +
+ arch/s390/kernel/entry64.S |    1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -265,6 +265,7 @@ sysc_sigpending:
+       tm      __TI_flags+3(%r12),_TIF_SYSCALL
+       jno     sysc_return
+       lm      %r2,%r7,__PT_R2(%r11)   # load svc arguments
++      l       %r10,__TI_sysc_table(%r12)      # 31 bit system call table
+       xr      %r8,%r8                 # svc 0 returns -ENOSYS
+       clc     __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+       jnl     sysc_nr_ok              # invalid svc number -> do svc 0
+--- a/arch/s390/kernel/entry64.S
++++ b/arch/s390/kernel/entry64.S
+@@ -296,6 +296,7 @@ sysc_sigpending:
+       tm      __TI_flags+7(%r12),_TIF_SYSCALL
+       jno     sysc_return
+       lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
++      lg      %r10,__TI_sysc_table(%r12)      # address of system call table
+       lghi    %r8,0                   # svc 0 returns -ENOSYS
+       llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
+       cghi    %r1,NR_syscalls
index ee0909d273fec34f674b0c6729a4d7e706ec3b16..ed6f5c103f967d644befdb479df303948c8dba5c 100644 (file)
@@ -111,3 +111,10 @@ arm-tegra-unify-tegra-s-kconfig-a-bit-more.patch
 alsa-hda-fix-gpio-for-acer-aspire-3830tg.patch
 arm-multi_v7_defconfig-enable-arm_atag_dtb_compat.patch
 hid-wiimote-fix-ff-deadlock.patch
+mmc-fix-null-pointer-use-in-mmc_blk_remove_req.patch
+tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch
+s390-fix-system-call-restart-after-inferior-call.patch
+btrfs-reset-ret-in-record_one_backref.patch
+btrfs-change-how-we-queue-blocks-for-backref-checking.patch
+btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
+btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
diff --git a/queue-3.11/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch b/queue-3.11/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch
new file mode 100644 (file)
index 0000000..64a75e1
--- /dev/null
@@ -0,0 +1,71 @@
+From f862eefec0b68e099a9fa58d3761ffb10bad97e1 Mon Sep 17 00:00:00 2001
+From: Chris Metcalf <cmetcalf@tilera.com>
+Date: Thu, 26 Sep 2013 13:24:53 -0400
+Subject: tile: use a more conservative __my_cpu_offset in CONFIG_PREEMPT
+
+From: Chris Metcalf <cmetcalf@tilera.com>
+
+commit f862eefec0b68e099a9fa58d3761ffb10bad97e1 upstream.
+
+It turns out the kernel relies on barrier() to force a reload of the
+percpu offset value.  Since we can't easily modify the definition of
+barrier() to include "tp" as an output register, we instead provide a
+definition of __my_cpu_offset as extended assembly that includes a fake
+stack read to hazard against barrier(), forcing gcc to know that it
+must reread "tp" and recompute anything based on "tp" after a barrier.
+
+This fixes observed hangs in the slub allocator when we are looping
+on a percpu cmpxchg_double.
+
+A similar fix for ARMv7 was made in June in change 509eb76ebf97.
+
+Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/include/asm/percpu.h |   34 +++++++++++++++++++++++++++++++---
+ 1 file changed, 31 insertions(+), 3 deletions(-)
+
+--- a/arch/tile/include/asm/percpu.h
++++ b/arch/tile/include/asm/percpu.h
+@@ -15,9 +15,37 @@
+ #ifndef _ASM_TILE_PERCPU_H
+ #define _ASM_TILE_PERCPU_H
+-register unsigned long __my_cpu_offset __asm__("tp");
+-#define __my_cpu_offset __my_cpu_offset
+-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
++register unsigned long my_cpu_offset_reg asm("tp");
++
++#ifdef CONFIG_PREEMPT
++/*
++ * For full preemption, we can't just use the register variable
++ * directly, since we need barrier() to hazard against it, causing the
++ * compiler to reload anything computed from a previous "tp" value.
++ * But we also don't want to use volatile asm, since we'd like the
++ * compiler to be able to cache the value across multiple percpu reads.
++ * So we use a fake stack read as a hazard against barrier().
++ * The 'U' constraint is like 'm' but disallows postincrement.
++ */
++static inline unsigned long __my_cpu_offset(void)
++{
++      unsigned long tp;
++      register unsigned long *sp asm("sp");
++      asm("move %0, tp" : "=r" (tp) : "U" (*sp));
++      return tp;
++}
++#define __my_cpu_offset __my_cpu_offset()
++#else
++/*
++ * We don't need to hazard against barrier() since "tp" doesn't ever
++ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
++ * changes at function call points, at which we are already re-reading
++ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
++ */
++#define __my_cpu_offset my_cpu_offset_reg
++#endif
++
++#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
+ #include <asm-generic/percpu.h>