]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Jan 2016 06:53:27 +0000 (22:53 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Jan 2016 06:53:27 +0000 (22:53 -0800)
added patches:
arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch
arm64-fix-building-without-config_uid16.patch
arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch
hid-core-avoid-uninitialized-buffer-access.patch
parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch
powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch
powerpc-make-value-returning-atomics-fully-ordered.patch
powerpc-tm-block-signal-return-setting-invalid-msr-state.patch
scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch

queue-3.10/arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch [new file with mode: 0644]
queue-3.10/arm64-fix-building-without-config_uid16.patch [new file with mode: 0644]
queue-3.10/arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch [new file with mode: 0644]
queue-3.10/hid-core-avoid-uninitialized-buffer-access.patch [new file with mode: 0644]
queue-3.10/parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch [new file with mode: 0644]
queue-3.10/powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch [new file with mode: 0644]
queue-3.10/powerpc-make-value-returning-atomics-fully-ordered.patch [new file with mode: 0644]
queue-3.10/powerpc-tm-block-signal-return-setting-invalid-msr-state.patch [new file with mode: 0644]
queue-3.10/scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch b/queue-3.10/arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch
new file mode 100644 (file)
index 0000000..960cc63
--- /dev/null
@@ -0,0 +1,39 @@
+From 5db4fd8c52810bd9740c1240ebf89223b171aa70 Mon Sep 17 00:00:00 2001
+From: John Blackwood <john.blackwood@ccur.com>
+Date: Mon, 7 Dec 2015 11:50:34 +0000
+Subject: arm64: Clear out any singlestep state on a ptrace detach operation
+
+From: John Blackwood <john.blackwood@ccur.com>
+
+commit 5db4fd8c52810bd9740c1240ebf89223b171aa70 upstream.
+
+Make sure to clear out any ptrace singlestep state when a ptrace(2)
+PTRACE_DETACH call is made on arm64 systems.
+
+Otherwise, the previously ptraced task will die off with a SIGTRAP
+signal if the debugger just previously singlestepped the ptraced task.
+
+Signed-off-by: John Blackwood <john.blackwood@ccur.com>
+[will: added comment to justify why this is in the arch code]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/ptrace.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -51,6 +51,12 @@
+  */
+ void ptrace_disable(struct task_struct *child)
+ {
++      /*
++       * This would be better off in core code, but PTRACE_DETACH has
++       * grown its fair share of arch-specific worts and changing it
++       * is likely to cause regressions on obscure architectures.
++       */
++      user_disable_single_step(child);
+ }
+ /*
diff --git a/queue-3.10/arm64-fix-building-without-config_uid16.patch b/queue-3.10/arm64-fix-building-without-config_uid16.patch
new file mode 100644 (file)
index 0000000..2f6ef4e
--- /dev/null
@@ -0,0 +1,62 @@
+From fbc416ff86183e2203cdf975e2881d7c164b0271 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 20 Nov 2015 12:12:21 +0100
+Subject: arm64: fix building without CONFIG_UID16
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit fbc416ff86183e2203cdf975e2881d7c164b0271 upstream.
+
+As reported by Michal Simek, building an ARM64 kernel with CONFIG_UID16
+disabled currently fails because the system call table still needs to
+reference the individual function entry points that are provided by
+kernel/sys_ni.c in this case, and the declarations are hidden inside
+of #ifdef CONFIG_UID16:
+
+arch/arm64/include/asm/unistd32.h:57:8: error: 'sys_lchown16' undeclared here (not in a function)
+ __SYSCALL(__NR_lchown, sys_lchown16)
+
+I believe this problem only exists on ARM64, because older architectures
+tend to not need declarations when their system call table is built
+in assembly code, while newer architectures tend to not need UID16
+support. ARM64 only uses these system calls for compatibility with
+32-bit ARM binaries.
+
+This changes the CONFIG_UID16 check into CONFIG_HAVE_UID16, which is
+set unconditionally on ARM64 with CONFIG_COMPAT, so we see the
+declarations whenever we need them, but otherwise the behavior is
+unchanged.
+
+Fixes: af1839eb4bd4 ("Kconfig: clean up the long arch list for the UID16 config option")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/syscalls.h |    2 +-
+ include/linux/types.h    |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -498,7 +498,7 @@ asmlinkage long sys_chown(const char __u
+ asmlinkage long sys_lchown(const char __user *filename,
+                               uid_t user, gid_t group);
+ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ asmlinkage long sys_chown16(const char __user *filename,
+                               old_uid_t user, old_gid_t group);
+ asmlinkage long sys_lchown16(const char __user *filename,
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -35,7 +35,7 @@ typedef __kernel_gid16_t        gid16_t;
+ typedef unsigned long         uintptr_t;
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ /* This is defined by include/asm-{arch}/posix_types.h */
+ typedef __kernel_old_uid_t    old_uid_t;
+ typedef __kernel_old_gid_t    old_gid_t;
diff --git a/queue-3.10/arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch b/queue-3.10/arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch
new file mode 100644 (file)
index 0000000..21063c4
--- /dev/null
@@ -0,0 +1,36 @@
+From 32d6397805d00573ce1fa55f408ce2bca15b0ad3 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 10 Dec 2015 16:05:36 +0000
+Subject: arm64: mm: ensure that the zero page is visible to the page table walker
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 32d6397805d00573ce1fa55f408ce2bca15b0ad3 upstream.
+
+In paging_init, we allocate the zero page, memset it to zero and then
+point TTBR0 to it in order to avoid speculative fetches through the
+identity mapping.
+
+In order to guarantee that the freshly zeroed page is indeed visible to
+the page table walker, we need to execute a dsb instruction prior to
+writing the TTBR.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -348,6 +348,9 @@ void __init paging_init(void)
+       empty_zero_page = virt_to_page(zero_page);
++      /* Ensure the zero page is visible to the page table walker */
++      dsb(ishst);
++
+       /*
+        * TTBR0 is only used for the identity mapping at this stage. Make it
+        * point to zero page to avoid speculatively fetching new entries.
diff --git a/queue-3.10/hid-core-avoid-uninitialized-buffer-access.patch b/queue-3.10/hid-core-avoid-uninitialized-buffer-access.patch
new file mode 100644 (file)
index 0000000..08e6545
--- /dev/null
@@ -0,0 +1,39 @@
+From 79b568b9d0c7c5d81932f4486d50b38efdd6da6d Mon Sep 17 00:00:00 2001
+From: Richard Purdie <richard.purdie@linuxfoundation.org>
+Date: Fri, 18 Sep 2015 16:31:33 -0700
+Subject: HID: core: Avoid uninitialized buffer access
+
+From: Richard Purdie <richard.purdie@linuxfoundation.org>
+
+commit 79b568b9d0c7c5d81932f4486d50b38efdd6da6d upstream.
+
+hid_connect adds various strings to the buffer but they're all
+conditional. You can find circumstances where nothing would be written
+to it but the kernel will still print the supposedly empty buffer with
+printk. This leads to corruption on the console/in the logs.
+
+Ensure buf is initialized to an empty string.
+
+Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
+[dvhart: Initialize string to "" rather than assign buf[0] = NULL;]
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: linux-input@vger.kernel.org
+Signed-off-by: Darren Hart <dvhart@linux.intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1462,7 +1462,7 @@ int hid_connect(struct hid_device *hdev,
+               "Multi-Axis Controller"
+       };
+       const char *type, *bus;
+-      char buf[64];
++      char buf[64] = "";
+       unsigned int i;
+       int len;
+       int ret;
diff --git a/queue-3.10/parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch b/queue-3.10/parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch
new file mode 100644 (file)
index 0000000..e35fc25
--- /dev/null
@@ -0,0 +1,127 @@
+From e46e31a3696ae2d66f32c207df3969613726e636 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 30 Nov 2015 14:47:46 -0500
+Subject: parisc iommu: fix panic due to trying to allocate too large region
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit e46e31a3696ae2d66f32c207df3969613726e636 upstream.
+
+When using the Promise TX2+ SATA controller on PA-RISC, the system often
+crashes with kernel panic, for example just writing data with the dd
+utility will make it crash.
+
+Kernel panic - not syncing: drivers/parisc/sba_iommu.c: I/O MMU @ 000000000000a000 is out of mapping resources
+
+CPU: 0 PID: 18442 Comm: mkspadfs Not tainted 4.4.0-rc2 #2
+Backtrace:
+ [<000000004021497c>] show_stack+0x14/0x20
+ [<0000000040410bf0>] dump_stack+0x88/0x100
+ [<000000004023978c>] panic+0x124/0x360
+ [<0000000040452c18>] sba_alloc_range+0x698/0x6a0
+ [<0000000040453150>] sba_map_sg+0x260/0x5b8
+ [<000000000c18dbb4>] ata_qc_issue+0x264/0x4a8 [libata]
+ [<000000000c19535c>] ata_scsi_translate+0xe4/0x220 [libata]
+ [<000000000c19a93c>] ata_scsi_queuecmd+0xbc/0x320 [libata]
+ [<0000000040499bbc>] scsi_dispatch_cmd+0xfc/0x130
+ [<000000004049da34>] scsi_request_fn+0x6e4/0x970
+ [<00000000403e95a8>] __blk_run_queue+0x40/0x60
+ [<00000000403e9d8c>] blk_run_queue+0x3c/0x68
+ [<000000004049a534>] scsi_run_queue+0x2a4/0x360
+ [<000000004049be68>] scsi_end_request+0x1a8/0x238
+ [<000000004049de84>] scsi_io_completion+0xfc/0x688
+ [<0000000040493c74>] scsi_finish_command+0x17c/0x1d0
+
+The cause of the crash is not exhaustion of the IOMMU space, there is
+plenty of free pages. The function sba_alloc_range is called with size
+0x11000, thus the pages_needed variable is 0x11. The function
+sba_search_bitmap is called with bits_wanted 0x11 and boundary size is
+0x10 (because dma_get_seg_boundary(dev) returns 0xffff).
+
+The function sba_search_bitmap attempts to allocate 17 pages that must not
+cross 16-page boundary - it can't satisfy this requirement
+(iommu_is_span_boundary always returns true) and fails even if there are
+many free entries in the IOMMU space.
+
+How did it happen that we try to allocate 17 pages that don't cross
+16-page boundary? The cause is in the function iommu_coalesce_chunks. This
+function tries to coalesce adjacent entries in the scatterlist. The
+function does several checks if it may coalesce one entry with the next,
+one of those checks is this:
+
+       if (startsg->length + dma_len > max_seg_size)
+               break;
+
+When it finishes coalescing adjacent entries, it allocates the mapping:
+
+sg_dma_len(contig_sg) = dma_len;
+dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
+sg_dma_address(contig_sg) =
+       PIDE_FLAG
+       | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
+       | dma_offset;
+
+It is possible that (startsg->length + dma_len > max_seg_size) is false
+(we are just near the 0x10000 max_seg_size boundary), so the funcion
+decides to coalesce this entry with the next entry. When the coalescing
+succeeds, the function performs
+       dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
+And now, because of non-zero dma_offset, dma_len is greater than 0x10000.
+iommu_alloc_range (a pointer to sba_alloc_range) is called and it attempts
+to allocate 17 pages for a device that must not cross 16-page boundary.
+
+To fix the bug, we must make sure that dma_len after addition of
+dma_offset and alignment doesn't cross the segment boundary. I.e. change
+       if (startsg->length + dma_len > max_seg_size)
+               break;
+to
+       if (ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) > max_seg_size)
+               break;
+
+This patch makes this change (it precalculates max_seg_boundary at the
+beginning of the function iommu_coalesce_chunks). I also added a check
+that the mapping length doesn't exceed dma_get_seg_boundary(dev) (it is
+not needed for Promise TX2+ SATA, but it may be needed for other devices
+that have dma_get_seg_boundary lower than dma_get_max_seg_size).
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/parisc/iommu-helpers.h |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, s
+       struct scatterlist *contig_sg;     /* contig chunk head */
+       unsigned long dma_offset, dma_len; /* start/len of DMA stream */
+       unsigned int n_mappings = 0;
+-      unsigned int max_seg_size = dma_get_max_seg_size(dev);
++      unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
++                                      (unsigned)DMA_CHUNK_SIZE);
++      unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
++      if (max_seg_boundary)   /* check if the addition above didn't overflow */
++              max_seg_size = min(max_seg_size, max_seg_boundary);
+       while (nents > 0) {
+@@ -139,14 +143,11 @@ iommu_coalesce_chunks(struct ioc *ioc, s
+                       /*
+                       ** First make sure current dma stream won't
+-                      ** exceed DMA_CHUNK_SIZE if we coalesce the
++                      ** exceed max_seg_size if we coalesce the
+                       ** next entry.
+                       */   
+-                      if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
+-                                          IOVP_SIZE) > DMA_CHUNK_SIZE))
+-                              break;
+-
+-                      if (startsg->length + dma_len > max_seg_size)
++                      if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
++                                   max_seg_size))
+                               break;
+                       /*
diff --git a/queue-3.10/powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch b/queue-3.10/powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch
new file mode 100644 (file)
index 0000000..953b7e1
--- /dev/null
@@ -0,0 +1,98 @@
+From 81d7a3294de7e9828310bbf986a67246b13fa01e Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:32 +0800
+Subject: powerpc: Make {cmp}xchg* and their atomic_ versions fully
+ ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 81d7a3294de7e9828310bbf986a67246b13fa01e upstream.
+
+According to memory-barriers.txt, xchg*, cmpxchg* and their atomic_
+versions all need to be fully ordered, however they are now just
+RELEASE+ACQUIRE, which are not fully ordered.
+
+So also replace PPC_RELEASE_BARRIER and PPC_ACQUIRE_BARRIER with
+PPC_ATOMIC_ENTRY_BARRIER and PPC_ATOMIC_EXIT_BARRIER in
+__{cmp,}xchg_{u32,u64} respectively to guarantee fully ordered semantics
+of atomic{,64}_{cmp,}xchg() and {cmp,}xchg(), as a complement of commit
+b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+
+This patch depends on patch "powerpc: Make value-returning atomics fully
+ordered" for PPC_ATOMIC_ENTRY_BARRIER definition.
+
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/cmpxchg.h |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned lo
+       unsigned long prev;
+       __asm__ __volatile__(
+-      PPC_RELEASE_BARRIER
++      PPC_ATOMIC_ENTRY_BARRIER
+ "1:   lwarx   %0,0,%2 \n"
+       PPC405_ERR77(0,%2)
+ "     stwcx.  %3,0,%2 \n\
+       bne-    1b"
+-      PPC_ACQUIRE_BARRIER
++      PPC_ATOMIC_EXIT_BARRIER
+       : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+       : "r" (p), "r" (val)
+       : "cc", "memory");
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned lo
+       unsigned long prev;
+       __asm__ __volatile__(
+-      PPC_RELEASE_BARRIER
++      PPC_ATOMIC_ENTRY_BARRIER
+ "1:   ldarx   %0,0,%2 \n"
+       PPC405_ERR77(0,%2)
+ "     stdcx.  %3,0,%2 \n\
+       bne-    1b"
+-      PPC_ACQUIRE_BARRIER
++      PPC_ATOMIC_EXIT_BARRIER
+       : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+       : "r" (p), "r" (val)
+       : "cc", "memory");
+@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p,
+       unsigned int prev;
+       __asm__ __volatile__ (
+-      PPC_RELEASE_BARRIER
++      PPC_ATOMIC_ENTRY_BARRIER
+ "1:   lwarx   %0,0,%2         # __cmpxchg_u32\n\
+       cmpw    0,%0,%3\n\
+       bne-    2f\n"
+       PPC405_ERR77(0,%2)
+ "     stwcx.  %4,0,%2\n\
+       bne-    1b"
+-      PPC_ACQUIRE_BARRIER
++      PPC_ATOMIC_EXIT_BARRIER
+       "\n\
+ 2:"
+       : "=&r" (prev), "+m" (*p)
+@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p,
+       unsigned long prev;
+       __asm__ __volatile__ (
+-      PPC_RELEASE_BARRIER
++      PPC_ATOMIC_ENTRY_BARRIER
+ "1:   ldarx   %0,0,%2         # __cmpxchg_u64\n\
+       cmpd    0,%0,%3\n\
+       bne-    2f\n\
+       stdcx.  %4,0,%2\n\
+       bne-    1b"
+-      PPC_ACQUIRE_BARRIER
++      PPC_ATOMIC_EXIT_BARRIER
+       "\n\
+ 2:"
+       : "=&r" (prev), "+m" (*p)
diff --git a/queue-3.10/powerpc-make-value-returning-atomics-fully-ordered.patch b/queue-3.10/powerpc-make-value-returning-atomics-fully-ordered.patch
new file mode 100644 (file)
index 0000000..d6e8ec4
--- /dev/null
@@ -0,0 +1,52 @@
+From 49e9cf3f0c04bf76ffa59242254110309554861d Mon Sep 17 00:00:00 2001
+From: Boqun Feng <boqun.feng@gmail.com>
+Date: Mon, 2 Nov 2015 09:30:31 +0800
+Subject: powerpc: Make value-returning atomics fully ordered
+
+From: Boqun Feng <boqun.feng@gmail.com>
+
+commit 49e9cf3f0c04bf76ffa59242254110309554861d upstream.
+
+According to memory-barriers.txt:
+
+> Any atomic operation that modifies some state in memory and returns
+> information about the state (old or new) implies an SMP-conditional
+> general memory barrier (smp_mb()) on each side of the actual
+> operation ...
+
+Which mean these operations should be fully ordered. However on PPC,
+PPC_ATOMIC_ENTRY_BARRIER is the barrier before the actual operation,
+which is currently "lwsync" if SMP=y. The leading "lwsync" can not
+guarantee fully ordered atomics, according to Paul Mckenney:
+
+https://lkml.org/lkml/2015/10/14/970
+
+To fix this, we define PPC_ATOMIC_ENTRY_BARRIER as "sync" to guarantee
+the fully-ordered semantics.
+
+This also makes futex atomics fully ordered, which can avoid possible
+memory ordering problems if userspace code relies on futex system call
+for fully ordered semantics.
+
+Fixes: b97021f85517 ("powerpc: Fix atomic_xxx_return barrier semantics")
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/synch.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -44,7 +44,7 @@ static inline void isync(void)
+       MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+ #define PPC_ACQUIRE_BARRIER    "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+ #define PPC_RELEASE_BARRIER    stringify_in_c(LWSYNC) "\n"
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+ #define PPC_ATOMIC_EXIT_BARRIER        "\n" stringify_in_c(sync) "\n"
+ #else
+ #define PPC_ACQUIRE_BARRIER
diff --git a/queue-3.10/powerpc-tm-block-signal-return-setting-invalid-msr-state.patch b/queue-3.10/powerpc-tm-block-signal-return-setting-invalid-msr-state.patch
new file mode 100644 (file)
index 0000000..4216659
--- /dev/null
@@ -0,0 +1,82 @@
+From d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 19 Nov 2015 15:44:44 +1100
+Subject: powerpc/tm: Block signal return setting invalid MSR state
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 upstream.
+
+Currently we allow both the MSR T and S bits to be set by userspace on
+a signal return.  Unfortunately this is a reserved configuration and
+will cause a TM Bad Thing exception if attempted (via rfid).
+
+This patch checks for this case in both the 32 and 64 bit signals
+code.  If both T and S are set, we mark the context as invalid.
+
+Found using a syscall fuzzer.
+
+Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/reg.h  |    1 +
+ arch/powerpc/kernel/signal_32.c |   14 +++++++++-----
+ arch/powerpc/kernel/signal_64.c |    4 ++++
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -108,6 +108,7 @@
+ #define MSR_TS_T      __MASK(MSR_TS_T_LG)     /*  Transaction Transactional */
+ #define MSR_TS_MASK   (MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
+ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+ #define MSR_TM_TRANSACTIONAL(x)       (((x) & MSR_TS_MASK) == MSR_TS_T)
+ #define MSR_TM_SUSPENDED(x)   (((x) & MSR_TS_MASK) == MSR_TS_S)
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -858,6 +858,15 @@ static long restore_tm_user_regs(struct
+               return 1;
+ #endif /* CONFIG_SPE */
++      /* Get the top half of the MSR from the user context */
++      if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
++              return 1;
++      msr_hi <<= 32;
++      /* If TM bits are set to the reserved value, it's an invalid context */
++      if (MSR_TM_RESV(msr_hi))
++              return 1;
++      /* Pull in the MSR TM bits from the user context */
++      regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+       /* Now, recheckpoint.  This loads up all of the checkpointed (older)
+        * registers, including FP and V[S]Rs.  After recheckpointing, the
+        * transactional versions should be loaded.
+@@ -867,11 +876,6 @@ static long restore_tm_user_regs(struct
+       current->thread.tm_texasr |= TEXASR_FS;
+       /* This loads the checkpointed FP/VEC state, if used */
+       tm_recheckpoint(&current->thread, msr);
+-      /* Get the top half of the MSR */
+-      if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+-              return 1;
+-      /* Pull in MSR TM from user context */
+-      regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
+       /* This loads the speculative FP/VEC state, if used */
+       if (msr & MSR_FP) {
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -416,6 +416,10 @@ static long restore_tm_sigcontexts(struc
+       /* get MSR separately, transfer the LE bit if doing signal return */
+       err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
++      /* Don't allow reserved mode. */
++      if (MSR_TM_RESV(msr))
++              return -EINVAL;
++
+       /* pull in MSR TM from user context */
+       regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
diff --git a/queue-3.10/scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch b/queue-3.10/scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch
new file mode 100644 (file)
index 0000000..1efde7d
--- /dev/null
@@ -0,0 +1,44 @@
+From 2e50c4bef77511b42cc226865d6bc568fa7f8769 Mon Sep 17 00:00:00 2001
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Date: Tue, 12 Jan 2016 23:14:22 +1100
+Subject: scripts/recordmcount.pl: support data in text section on powerpc
+
+From: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+
+commit 2e50c4bef77511b42cc226865d6bc568fa7f8769 upstream.
+
+If a text section starts out with a data blob before the first
+function start label, disassembly parsing doing in recordmcount.pl
+gets confused on powerpc, leading to creation of corrupted module
+objects.
+
+This was not a problem so far since the compiler would never create
+such text sections.  However, this has changed with a recent change
+in GCC 6 to support distances of > 2GB between a function and its
+assoicated TOC in the ELFv2 ABI, exposing this problem.
+
+There is already code in recordmcount.pl to handle such data blobs
+on the sparc64 platform.  This patch uses the same method to handle
+those on powerpc as well.
+
+Acked-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.pl |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -265,7 +265,8 @@ if ($arch eq "x86_64") {
+ } elsif ($arch eq "powerpc") {
+     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
++    # See comment in the sparc64 section for why we use '\w'.
++    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
+     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+     if ($bits == 64) {
index e5a813f18247600025a0a4117fa26bc570182265..114112f0b327222e97e582adad942f024e43f61b 100644 (file)
@@ -40,3 +40,12 @@ phonet-properly-unshare-skbs-in-phonet_rcv.patch
 isdn_ppp-add-checks-for-allocation-failure-in-isdn_ppp_open.patch
 ppp-slip-validate-vj-compression-slot-parameters-completely.patch
 team-replace-rcu_read_lock-with-a-mutex-in-team_vlan_rx_kill_vid.patch
+powerpc-tm-block-signal-return-setting-invalid-msr-state.patch
+powerpc-make-value-returning-atomics-fully-ordered.patch
+powerpc-make-cmp-xchg-and-their-atomic_-versions-fully.patch
+scripts-recordmcount.pl-support-data-in-text-section-on-powerpc.patch
+arm64-fix-building-without-config_uid16.patch
+arm64-clear-out-any-singlestep-state-on-a-ptrace-detach-operation.patch
+arm64-mm-ensure-that-the-zero-page-is-visible-to-the-page-table-walker.patch
+parisc-iommu-fix-panic-due-to-trying-to-allocate-too-large-region.patch
+hid-core-avoid-uninitialized-buffer-access.patch