]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:30:19 +0000 (16:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Oct 2013 23:30:19 +0000 (16:30 -0700)
added patches:
btrfs-change-how-we-queue-blocks-for-backref-checking.patch
btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
s390-fix-system-call-restart-after-inferior-call.patch
tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch

queue-3.10/btrfs-change-how-we-queue-blocks-for-backref-checking.patch [new file with mode: 0644]
queue-3.10/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch [new file with mode: 0644]
queue-3.10/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch [new file with mode: 0644]
queue-3.10/s390-fix-system-call-restart-after-inferior-call.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch [new file with mode: 0644]

diff --git a/queue-3.10/btrfs-change-how-we-queue-blocks-for-backref-checking.patch b/queue-3.10/btrfs-change-how-we-queue-blocks-for-backref-checking.patch
new file mode 100644 (file)
index 0000000..6a86234
--- /dev/null
@@ -0,0 +1,68 @@
+From b6c60c8018c4e9beb2f83fc82c09f9d033766571 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Tue, 30 Jul 2013 16:30:30 -0400
+Subject: Btrfs: change how we queue blocks for backref checking
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit b6c60c8018c4e9beb2f83fc82c09f9d033766571 upstream.
+
+Previously we only added blocks to the list to have their backrefs checked if
+the level of the block is right above the one we are searching for.  This is
+because we want to make sure we don't add the entire path up to the root to the
+lists to make sure we process things one at a time.  This assumes that if any
+blocks in the path to the root are going to be not checked (shared in other
+words) then they will be in the level right above the current block on up.  This
+isn't quite right though since we can have blocks higher up the list that are
+shared because they are attached to a reloc root.  But we won't add this block
+to be checked and then later on we will BUG_ON(!upper->checked).  So instead
+keep track of wether or not we've queued a block to be checked in this current
+search, and if we haven't go ahead and queue it to be checked.  This patch fixed
+the panic I was seeing where we BUG_ON(!upper->checked).  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/relocation.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -691,6 +691,7 @@ struct backref_node *build_backref_tree(
+       int cowonly;
+       int ret;
+       int err = 0;
++      bool need_check = true;
+       path1 = btrfs_alloc_path();
+       path2 = btrfs_alloc_path();
+@@ -914,6 +915,7 @@ again:
+                       cur->bytenr);
+               lower = cur;
++              need_check = true;
+               for (; level < BTRFS_MAX_LEVEL; level++) {
+                       if (!path2->nodes[level]) {
+                               BUG_ON(btrfs_root_bytenr(&root->root_item) !=
+@@ -957,14 +959,12 @@ again:
+                               /*
+                                * add the block to pending list if we
+-                               * need check its backrefs. only block
+-                               * at 'cur->level + 1' is added to the
+-                               * tail of pending list. this guarantees
+-                               * we check backrefs from lower level
+-                               * blocks to upper level blocks.
++                               * need check its backrefs, we only do this once
++                               * while walking up a tree as we will catch
++                               * anything else later on.
+                                */
+-                              if (!upper->checked &&
+-                                  level == cur->level + 1) {
++                              if (!upper->checked && need_check) {
++                                      need_check = false;
+                                       list_add_tail(&edge->list[UPPER],
+                                                     &list);
+                               } else
diff --git a/queue-3.10/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch b/queue-3.10/btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
new file mode 100644 (file)
index 0000000..c9eded8
--- /dev/null
@@ -0,0 +1,42 @@
+From b8d0c69b9469ffd33df30fee3e990f2d4aa68a09 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Thu, 22 Aug 2013 17:03:29 -0400
+Subject: Btrfs: remove ourselves from the cluster list under lock
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit b8d0c69b9469ffd33df30fee3e990f2d4aa68a09 upstream.
+
+A user was reporting weird warnings from btrfs_put_delayed_ref() and I noticed
+that we were doing this list_del_init() on our head ref outside of
+delayed_refs->lock.  This is a problem if we have people still on the list, we
+could end up modifying old pointers and such.  Fix this by removing us from the
+list before we do our run_delayed_ref on our head ref.  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent-tree.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2402,6 +2402,8 @@ static noinline int run_clustered_refs(s
+                       default:
+                               WARN_ON(1);
+                       }
++              } else {
++                      list_del_init(&locked_ref->cluster);
+               }
+               spin_unlock(&delayed_refs->lock);
+@@ -2424,7 +2426,6 @@ static noinline int run_clustered_refs(s
+                * list before we release it.
+                */
+               if (btrfs_delayed_ref_is_head(ref)) {
+-                      list_del_init(&locked_ref->cluster);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       locked_ref = NULL;
+               }
diff --git a/queue-3.10/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch b/queue-3.10/btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
new file mode 100644 (file)
index 0000000..65d488f
--- /dev/null
@@ -0,0 +1,39 @@
+From a05254143cd183b18002cbba7759a1e4629aa762 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Mon, 12 Aug 2013 10:56:14 -0400
+Subject: Btrfs: skip subvol entries when checking if we've created a dir already
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit a05254143cd183b18002cbba7759a1e4629aa762 upstream.
+
+We have logic to see if we've already created a parent directory by check to see
+if an inode inside of that directory has a lower inode number than the one we
+are currently processing.  The logic is that if there is a lower inode number
+then we would have had to made sure the directory was created at that previous
+point.  The problem is that subvols inode numbers count from the lowest objectid
+in the root tree, which may be less than our current progress.  So just skip if
+our dir item key is a root item.  This fixes the original test and the xfstest
+version I made that added an extra subvol create.  Thanks,
+
+Reported-by: Emil Karlson <jekarlson@gmail.com>
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/send.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -2524,7 +2524,8 @@ static int did_create_dir(struct send_ct
+               di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+               btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+-              if (di_key.objectid < sctx->send_progress) {
++              if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
++                  di_key.objectid < sctx->send_progress) {
+                       ret = 1;
+                       goto out;
+               }
diff --git a/queue-3.10/s390-fix-system-call-restart-after-inferior-call.patch b/queue-3.10/s390-fix-system-call-restart-after-inferior-call.patch
new file mode 100644 (file)
index 0000000..0075736
--- /dev/null
@@ -0,0 +1,43 @@
+From dbbfe487e5f3fc00c9fe5207d63309859704d12f Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Fri, 27 Sep 2013 15:24:38 +0200
+Subject: s390: fix system call restart after inferior call
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit dbbfe487e5f3fc00c9fe5207d63309859704d12f upstream.
+
+Git commit 616498813b11ffef "s390: system call path micro optimization"
+introduced a regression in regard to system call restarting and inferior
+function calls via the ptrace interface. The pointer to the system call
+table needs to be loaded in sysc_sigpending if do_signal returns with
+TIF_SYSCALl set after it restored a system call context.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/entry.S   |    1 +
+ arch/s390/kernel/entry64.S |    1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -265,6 +265,7 @@ sysc_sigpending:
+       tm      __TI_flags+3(%r12),_TIF_SYSCALL
+       jno     sysc_return
+       lm      %r2,%r7,__PT_R2(%r11)   # load svc arguments
++      l       %r10,__TI_sysc_table(%r12)      # 31 bit system call table
+       xr      %r8,%r8                 # svc 0 returns -ENOSYS
+       clc     __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+       jnl     sysc_nr_ok              # invalid svc number -> do svc 0
+--- a/arch/s390/kernel/entry64.S
++++ b/arch/s390/kernel/entry64.S
+@@ -293,6 +293,7 @@ sysc_sigpending:
+       tm      __TI_flags+7(%r12),_TIF_SYSCALL
+       jno     sysc_return
+       lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
++      lg      %r10,__TI_sysc_table(%r12)      # address of system call table
+       lghi    %r8,0                   # svc 0 returns -ENOSYS
+       llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
+       cghi    %r1,NR_syscalls
index 6e63f2ff88806c6cc6bedcfb5538c1c6d5dfbd31..adf0216cbc2b3f11d5801a9d6403f26585c265ff 100644 (file)
@@ -90,3 +90,8 @@ xfs-fix-node-forward-in-xfs_node_toosmall.patch
 drm-nouveau-bios-init-stub-opcode-0xaa.patch
 irq-force-hardirq-exit-s-softirq-processing-on-its-own-stack.patch
 alsa-hda-fix-gpio-for-acer-aspire-3830tg.patch
+tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch
+s390-fix-system-call-restart-after-inferior-call.patch
+btrfs-change-how-we-queue-blocks-for-backref-checking.patch
+btrfs-skip-subvol-entries-when-checking-if-we-ve-created-a-dir-already.patch
+btrfs-remove-ourselves-from-the-cluster-list-under-lock.patch
diff --git a/queue-3.10/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch b/queue-3.10/tile-use-a-more-conservative-__my_cpu_offset-in-config_preempt.patch
new file mode 100644 (file)
index 0000000..64a75e1
--- /dev/null
@@ -0,0 +1,71 @@
+From f862eefec0b68e099a9fa58d3761ffb10bad97e1 Mon Sep 17 00:00:00 2001
+From: Chris Metcalf <cmetcalf@tilera.com>
+Date: Thu, 26 Sep 2013 13:24:53 -0400
+Subject: tile: use a more conservative __my_cpu_offset in CONFIG_PREEMPT
+
+From: Chris Metcalf <cmetcalf@tilera.com>
+
+commit f862eefec0b68e099a9fa58d3761ffb10bad97e1 upstream.
+
+It turns out the kernel relies on barrier() to force a reload of the
+percpu offset value.  Since we can't easily modify the definition of
+barrier() to include "tp" as an output register, we instead provide a
+definition of __my_cpu_offset as extended assembly that includes a fake
+stack read to hazard against barrier(), forcing gcc to know that it
+must reread "tp" and recompute anything based on "tp" after a barrier.
+
+This fixes observed hangs in the slub allocator when we are looping
+on a percpu cmpxchg_double.
+
+A similar fix for ARMv7 was made in June in change 509eb76ebf97.
+
+Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/include/asm/percpu.h |   34 +++++++++++++++++++++++++++++++---
+ 1 file changed, 31 insertions(+), 3 deletions(-)
+
+--- a/arch/tile/include/asm/percpu.h
++++ b/arch/tile/include/asm/percpu.h
+@@ -15,9 +15,37 @@
+ #ifndef _ASM_TILE_PERCPU_H
+ #define _ASM_TILE_PERCPU_H
+-register unsigned long __my_cpu_offset __asm__("tp");
+-#define __my_cpu_offset __my_cpu_offset
+-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
++register unsigned long my_cpu_offset_reg asm("tp");
++
++#ifdef CONFIG_PREEMPT
++/*
++ * For full preemption, we can't just use the register variable
++ * directly, since we need barrier() to hazard against it, causing the
++ * compiler to reload anything computed from a previous "tp" value.
++ * But we also don't want to use volatile asm, since we'd like the
++ * compiler to be able to cache the value across multiple percpu reads.
++ * So we use a fake stack read as a hazard against barrier().
++ * The 'U' constraint is like 'm' but disallows postincrement.
++ */
++static inline unsigned long __my_cpu_offset(void)
++{
++      unsigned long tp;
++      register unsigned long *sp asm("sp");
++      asm("move %0, tp" : "=r" (tp) : "U" (*sp));
++      return tp;
++}
++#define __my_cpu_offset __my_cpu_offset()
++#else
++/*
++ * We don't need to hazard against barrier() since "tp" doesn't ever
++ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
++ * changes at function call points, at which we are already re-reading
++ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
++ */
++#define __my_cpu_offset my_cpu_offset_reg
++#endif
++
++#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
+ #include <asm-generic/percpu.h>