]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 16 Feb 2014 18:45:16 +0000 (10:45 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 16 Feb 2014 18:45:16 +0000 (10:45 -0800)
added patches:
arm64-add-dsb-after-icache-flush-in-__flush_icache_all.patch
arm64-invalidate-the-tlb-when-replacing-pmd-entries-during-boot.patch
arm64-vdso-fix-coarse-clock-handling.patch
arm64-vdso-prevent-ld-from-aligning-pt_load-segments-to-64k.patch
arm64-vdso-update-wtm-fields-for-clock_monotonic_coarse.patch
btrfs-disable-snapshot-aware-defrag-for-now.patch
crypto-s390-fix-concurrency-issue-in-aes-ctr-mode.patch
crypto-s390-fix-des-and-des3_ede-cbc-concurrency-issue.patch
crypto-s390-fix-des-and-des3_ede-ctr-concurrency-issue.patch
irqchip-armada-370-xp-fix-ipi-race-condition.patch

queue-3.10/arm64-add-dsb-after-icache-flush-in-__flush_icache_all.patch [new file with mode: 0644]
queue-3.10/arm64-invalidate-the-tlb-when-replacing-pmd-entries-during-boot.patch [new file with mode: 0644]
queue-3.10/arm64-vdso-fix-coarse-clock-handling.patch [new file with mode: 0644]
queue-3.10/arm64-vdso-prevent-ld-from-aligning-pt_load-segments-to-64k.patch [new file with mode: 0644]
queue-3.10/arm64-vdso-update-wtm-fields-for-clock_monotonic_coarse.patch [new file with mode: 0644]
queue-3.10/btrfs-disable-snapshot-aware-defrag-for-now.patch [new file with mode: 0644]
queue-3.10/crypto-s390-fix-concurrency-issue-in-aes-ctr-mode.patch [new file with mode: 0644]
queue-3.10/crypto-s390-fix-des-and-des3_ede-cbc-concurrency-issue.patch [new file with mode: 0644]
queue-3.10/crypto-s390-fix-des-and-des3_ede-ctr-concurrency-issue.patch [new file with mode: 0644]
queue-3.10/irqchip-armada-370-xp-fix-ipi-race-condition.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/arm64-add-dsb-after-icache-flush-in-__flush_icache_all.patch b/queue-3.10/arm64-add-dsb-after-icache-flush-in-__flush_icache_all.patch
new file mode 100644 (file)
index 0000000..a3eea8e
--- /dev/null
@@ -0,0 +1,33 @@
+From 5044bad43ee573d0b6d90e3ccb7a40c2c7d25eb4 Mon Sep 17 00:00:00 2001
+From: Vinayak Kale <vkale@apm.com>
+Date: Wed, 5 Feb 2014 09:34:36 +0000
+Subject: arm64: add DSB after icache flush in __flush_icache_all()
+
+From: Vinayak Kale <vkale@apm.com>
+
+commit 5044bad43ee573d0b6d90e3ccb7a40c2c7d25eb4 upstream.
+
+Add DSB after icache flush to complete the cache maintenance operation.
+The function __flush_icache_all() is used only for user space mappings
+and an ISB is not required because of an exception return before executing
+user instructions. An exception return would behave like an ISB.
+
+Signed-off-by: Vinayak Kale <vkale@apm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cacheflush.h |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct pag
+ static inline void __flush_icache_all(void)
+ {
+       asm("ic ialluis");
++      dsb();
+ }
+ #define flush_dcache_mmap_lock(mapping) \
diff --git a/queue-3.10/arm64-invalidate-the-tlb-when-replacing-pmd-entries-during-boot.patch b/queue-3.10/arm64-invalidate-the-tlb-when-replacing-pmd-entries-during-boot.patch
new file mode 100644 (file)
index 0000000..f0cf608
--- /dev/null
@@ -0,0 +1,49 @@
+From a55f9929a9b257f84b6cc7b2397379cabd744a22 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Tue, 4 Feb 2014 16:01:31 +0000
+Subject: arm64: Invalidate the TLB when replacing pmd entries during boot
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit a55f9929a9b257f84b6cc7b2397379cabd744a22 upstream.
+
+With the 64K page size configuration, __create_page_tables in head.S
+maps enough memory to get started but using 64K pages rather than 512M
+sections with a single pgd/pud/pmd entry pointing to a pte table.
+create_mapping() may override the pgd/pud/pmd table entry with a block
+(section) one if the RAM size is more than 512MB and aligned correctly.
+For the end of this block to be accessible, the old TLB entry must be
+invalidated.
+
+Reported-by: Mark Salter <msalter@redhat.com>
+Tested-by: Mark Salter <msalter@redhat.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t
+       do {
+               next = pmd_addr_end(addr, end);
+               /* try section mapping first */
+-              if (((addr | next | phys) & ~SECTION_MASK) == 0)
++              if (((addr | next | phys) & ~SECTION_MASK) == 0) {
++                      pmd_t old_pmd =*pmd;
+                       set_pmd(pmd, __pmd(phys | prot_sect_kernel));
+-              else
++                      /*
++                       * Check for previous table entries created during
++                       * boot (__create_page_tables) and flush them.
++                       */
++                      if (!pmd_none(old_pmd))
++                              flush_tlb_all();
++              } else {
+                       alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
++              }
+               phys += next - addr;
+       } while (pmd++, addr = next, addr != end);
+ }
diff --git a/queue-3.10/arm64-vdso-fix-coarse-clock-handling.patch b/queue-3.10/arm64-vdso-fix-coarse-clock-handling.patch
new file mode 100644 (file)
index 0000000..ce50a14
--- /dev/null
@@ -0,0 +1,64 @@
+From 069b918623e1510e58dacf178905a72c3baa3ae4 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathan_lynch@mentor.com>
+Date: Wed, 5 Feb 2014 05:53:04 +0000
+Subject: arm64: vdso: fix coarse clock handling
+
+From: Nathan Lynch <nathan_lynch@mentor.com>
+
+commit 069b918623e1510e58dacf178905a72c3baa3ae4 upstream.
+
+When __kernel_clock_gettime is called with a CLOCK_MONOTONIC_COARSE or
+CLOCK_REALTIME_COARSE clock id, it returns incorrectly to whatever the
+caller has placed in x2 ("ret x2" to return from the fast path).  Fix
+this by saving x30/LR to x2 only in code that will call
+__do_get_tspec, restoring x30 afterward, and using a plain "ret" to
+return from the routine.
+
+Also: while the resulting tv_nsec value for CLOCK_REALTIME and
+CLOCK_MONOTONIC must be computed using intermediate values that are
+left-shifted by cs_shift (x12, set by __do_get_tspec), the results for
+coarse clocks should be calculated using unshifted values
+(xtime_coarse_nsec is in units of actual nanoseconds).  The current
+code shifts intermediate values by x12 unconditionally, but x12 is
+uninitialized when servicing a coarse clock.  Fix this by setting x12
+to 0 once we know we are dealing with a coarse clock id.
+
+Signed-off-by: Nathan Lynch <nathan_lynch@mentor.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/vdso/gettimeofday.S |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/vdso/gettimeofday.S
++++ b/arch/arm64/kernel/vdso/gettimeofday.S
+@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
+       bl      __do_get_tspec
+       seqcnt_check w9, 1b
++      mov     x30, x2
++
+       cmp     w0, #CLOCK_MONOTONIC
+       b.ne    6f
+@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
+       ccmp    w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+       b.ne    8f
++      /* xtime_coarse_nsec is already right-shifted */
++      mov     x12, #0
++
+       /* Get coarse timespec. */
+       adr     vdso_data, _vdso_data
+ 3:    seqcnt_acquire
+@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
+       lsr     x11, x11, x12
+       stp     x10, x11, [x1, #TSPEC_TV_SEC]
+       mov     x0, xzr
+-      ret     x2
++      ret
+ 7:
+       mov     x30, x2
+ 8:    /* Syscall fallback. */
diff --git a/queue-3.10/arm64-vdso-prevent-ld-from-aligning-pt_load-segments-to-64k.patch b/queue-3.10/arm64-vdso-prevent-ld-from-aligning-pt_load-segments-to-64k.patch
new file mode 100644 (file)
index 0000000..cd3988f
--- /dev/null
@@ -0,0 +1,41 @@
+From 40507403485fcb56b83d6ddfc954e9b08305054c Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 4 Feb 2014 14:41:26 +0000
+Subject: arm64: vdso: prevent ld from aligning PT_LOAD segments to 64k
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 40507403485fcb56b83d6ddfc954e9b08305054c upstream.
+
+Whilst the text segment for our VDSO is marked as PT_LOAD in the ELF
+headers, it is mapped by the kernel and not actually subject to
+demand-paging. ld doesn't realise this, and emits a p_align field of 64k
+(the maximum supported page size), which conflicts with the load address
+picked by the kernel on 4k systems, which will be 4k aligned. This
+causes GDB to fail with "Failed to read a valid object file image from
+memory" when attempting to load the VDSO.
+
+This patch passes the -n option to ld, which prevents it from aligning
+PT_LOAD segments to the maximum page size.
+
+Reported-by: Kyle McMartin <kyle@redhat.com>
+Acked-by: Kyle McMartin <kyle@redhat.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/vdso/Makefile |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
+ # Actual build commands
+ quiet_cmd_vdsold = VDSOL $@
+-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
++      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+ quiet_cmd_vdsoas = VDSOA $@
+       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/queue-3.10/arm64-vdso-update-wtm-fields-for-clock_monotonic_coarse.patch b/queue-3.10/arm64-vdso-update-wtm-fields-for-clock_monotonic_coarse.patch
new file mode 100644 (file)
index 0000000..b12a3f8
--- /dev/null
@@ -0,0 +1,42 @@
+From d4022a335271a48cce49df35d825897914fbffe3 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathan_lynch@mentor.com>
+Date: Mon, 3 Feb 2014 19:48:52 +0000
+Subject: arm64: vdso: update wtm fields for CLOCK_MONOTONIC_COARSE
+
+From: Nathan Lynch <nathan_lynch@mentor.com>
+
+commit d4022a335271a48cce49df35d825897914fbffe3 upstream.
+
+Update wall-to-monotonic fields in the VDSO data page
+unconditionally.  These are used to service CLOCK_MONOTONIC_COARSE,
+which is not guarded by use_syscall.
+
+Signed-off-by: Nathan Lynch <nathan_lynch@mentor.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/vdso.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *
+       vdso_data->use_syscall                  = use_syscall;
+       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
+       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
++      vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
++      vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
+       if (!use_syscall) {
+               vdso_data->cs_cycle_last        = tk->clock->cycle_last;
+@@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *
+               vdso_data->xtime_clock_nsec     = tk->xtime_nsec;
+               vdso_data->cs_mult              = tk->mult;
+               vdso_data->cs_shift             = tk->shift;
+-              vdso_data->wtm_clock_sec        = tk->wall_to_monotonic.tv_sec;
+-              vdso_data->wtm_clock_nsec       = tk->wall_to_monotonic.tv_nsec;
+       }
+       smp_wmb();
diff --git a/queue-3.10/btrfs-disable-snapshot-aware-defrag-for-now.patch b/queue-3.10/btrfs-disable-snapshot-aware-defrag-for-now.patch
new file mode 100644 (file)
index 0000000..c263493
--- /dev/null
@@ -0,0 +1,31 @@
+From 8101c8dbf6243ba517aab58d69bf1bc37d8b7b9c Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Wed, 29 Jan 2014 16:05:30 -0500
+Subject: Btrfs: disable snapshot aware defrag for now
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit 8101c8dbf6243ba517aab58d69bf1bc37d8b7b9c upstream.
+
+It's just broken and it's taking a lot of effort to fix it, so for now just
+disable it so people can defrag in peace.  Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/inode.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2655,7 +2655,7 @@ static int btrfs_finish_ordered_io(struc
+                       EXTENT_DEFRAG, 1, cached_state);
+       if (ret) {
+               u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+-              if (last_snapshot >= BTRFS_I(inode)->generation)
++              if (0 && last_snapshot >= BTRFS_I(inode)->generation)
+                       /* the inode is shared */
+                       new = record_old_file_extents(inode, ordered_extent);
diff --git a/queue-3.10/crypto-s390-fix-concurrency-issue-in-aes-ctr-mode.patch b/queue-3.10/crypto-s390-fix-concurrency-issue-in-aes-ctr-mode.patch
new file mode 100644 (file)
index 0000000..3f77238
--- /dev/null
@@ -0,0 +1,145 @@
+From 0519e9ad89e5cd6e6b08398f57c6a71d9580564c Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Date: Thu, 16 Jan 2014 16:01:11 +0100
+Subject: crypto: s390 - fix concurrency issue in aes-ctr mode
+
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+
+commit 0519e9ad89e5cd6e6b08398f57c6a71d9580564c upstream.
+
+The aes-ctr mode uses one preallocated page without any concurrency
+protection. When multiple threads run aes-ctr encryption or decryption
+this can lead to data corruption.
+
+The patch introduces locking for the page and a fallback solution with
+slower en/decryption performance in concurrency situations.
+
+Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/crypto/aes_s390.c |   65 +++++++++++++++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 19 deletions(-)
+
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -25,6 +25,7 @@
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/spinlock.h>
+ #include "crypt_s390.h"
+ #define AES_KEYLEN_128                1
+@@ -32,6 +33,7 @@
+ #define AES_KEYLEN_256                4
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ static char keylen_flag;
+ struct s390_aes_ctx {
+@@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto
+       return aes_set_key(tfm, in_key, key_len);
+ }
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++      unsigned int i, n;
++
++      /* only use complete blocks, max. PAGE_SIZE */
++      n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
++      for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
++              memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
++                     AES_BLOCK_SIZE);
++              crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
++      }
++      return n;
++}
++
+ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+                        struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+ {
+       int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+-      unsigned int i, n, nbytes;
+-      u8 buf[AES_BLOCK_SIZE];
+-      u8 *out, *in;
++      unsigned int n, nbytes;
++      u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
++      u8 *out, *in, *ctrptr = ctrbuf;
+       if (!walk->nbytes)
+               return ret;
+-      memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
++      if (spin_trylock(&ctrblk_lock))
++              ctrptr = ctrblk;
++
++      memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+       while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               while (nbytes >= AES_BLOCK_SIZE) {
+-                      /* only use complete blocks, max. PAGE_SIZE */
+-                      n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+-                                               nbytes & ~(AES_BLOCK_SIZE - 1);
+-                      for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+-                              memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+-                                     AES_BLOCK_SIZE);
+-                              crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+-                      }
+-                      ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+-                      if (ret < 0 || ret != n)
++                      if (ctrptr == ctrblk)
++                              n = __ctrblk_init(ctrptr, nbytes);
++                      else
++                              n = AES_BLOCK_SIZE;
++                      ret = crypt_s390_kmctr(func, sctx->key, out, in,
++                                             n, ctrptr);
++                      if (ret < 0 || ret != n) {
++                              if (ctrptr == ctrblk)
++                                      spin_unlock(&ctrblk_lock);
+                               return -EIO;
++                      }
+                       if (n > AES_BLOCK_SIZE)
+-                              memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
++                              memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
+                                      AES_BLOCK_SIZE);
+-                      crypto_inc(ctrblk, AES_BLOCK_SIZE);
++                      crypto_inc(ctrptr, AES_BLOCK_SIZE);
+                       out += n;
+                       in += n;
+                       nbytes -= n;
+               }
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       }
++      if (ctrptr == ctrblk) {
++              if (nbytes)
++                      memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
++              else
++                      memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
++              spin_unlock(&ctrblk_lock);
++      }
+       /*
+        * final block may be < AES_BLOCK_SIZE, copy only nbytes
+        */
+@@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkciphe
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+-                                     AES_BLOCK_SIZE, ctrblk);
++                                     AES_BLOCK_SIZE, ctrbuf);
+               if (ret < 0 || ret != AES_BLOCK_SIZE)
+                       return -EIO;
+               memcpy(out, buf, nbytes);
+-              crypto_inc(ctrblk, AES_BLOCK_SIZE);
++              crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+               ret = blkcipher_walk_done(desc, walk, 0);
++              memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
+       }
+-      memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
++
+       return ret;
+ }
diff --git a/queue-3.10/crypto-s390-fix-des-and-des3_ede-cbc-concurrency-issue.patch b/queue-3.10/crypto-s390-fix-des-and-des3_ede-cbc-concurrency-issue.patch
new file mode 100644 (file)
index 0000000..a1b87cd
--- /dev/null
@@ -0,0 +1,115 @@
+From adc3fcf1552b6e406d172fd9690bbd1395053d13 Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Date: Wed, 22 Jan 2014 13:00:04 +0100
+Subject: crypto: s390 - fix des and des3_ede cbc concurrency issue
+
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+
+commit adc3fcf1552b6e406d172fd9690bbd1395053d13 upstream.
+
+In s390 des and des3_ede cbc mode the iv value is not protected
+against concurrency access and modifications from another running
+en/decrypt operation which is using the very same tfm struct
+instance. This fix copies the iv to the local stack before
+the crypto operation and stores the value back when done.
+
+Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/crypto/des_s390.c |   26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -105,29 +105,35 @@ static int ecb_desall_crypt(struct blkci
+ }
+ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+-                          u8 *iv, struct blkcipher_walk *walk)
++                          struct blkcipher_walk *walk)
+ {
++      struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       int ret = blkcipher_walk_virt(desc, walk);
+       unsigned int nbytes = walk->nbytes;
++      struct {
++              u8 iv[DES_BLOCK_SIZE];
++              u8 key[DES3_KEY_SIZE];
++      } param;
+       if (!nbytes)
+               goto out;
+-      memcpy(iv, walk->iv, DES_BLOCK_SIZE);
++      memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
++      memcpy(param.key, ctx->key, DES3_KEY_SIZE);
+       do {
+               /* only use complete blocks */
+               unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+               u8 *out = walk->dst.virt.addr;
+               u8 *in = walk->src.virt.addr;
+-              ret = crypt_s390_kmc(func, iv, out, in, n);
++              ret = crypt_s390_kmc(func, &param, out, in, n);
+               if (ret < 0 || ret != n)
+                       return -EIO;
+               nbytes &= DES_BLOCK_SIZE - 1;
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       } while ((nbytes = walk->nbytes));
+-      memcpy(walk->iv, iv, DES_BLOCK_SIZE);
++      memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
+ out:
+       return ret;
+@@ -179,22 +185,20 @@ static int cbc_des_encrypt(struct blkcip
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+ {
+-      struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+-      return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
++      return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+ }
+ static int cbc_des_decrypt(struct blkcipher_desc *desc,
+                          struct scatterlist *dst, struct scatterlist *src,
+                          unsigned int nbytes)
+ {
+-      struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+-      return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
++      return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+ }
+ static struct crypto_alg cbc_des_alg = {
+@@ -327,22 +331,20 @@ static int cbc_des3_encrypt(struct blkci
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
+ {
+-      struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+-      return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
++      return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+ }
+ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+                           struct scatterlist *dst, struct scatterlist *src,
+                           unsigned int nbytes)
+ {
+-      struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+-      return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
++      return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+ }
+ static struct crypto_alg cbc_des3_alg = {
diff --git a/queue-3.10/crypto-s390-fix-des-and-des3_ede-ctr-concurrency-issue.patch b/queue-3.10/crypto-s390-fix-des-and-des3_ede-ctr-concurrency-issue.patch
new file mode 100644 (file)
index 0000000..6e95146
--- /dev/null
@@ -0,0 +1,137 @@
+From ee97dc7db4cbda33e4241c2d85b42d1835bc8a35 Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Date: Wed, 22 Jan 2014 13:01:33 +0100
+Subject: crypto: s390 - fix des and des3_ede ctr concurrency issue
+
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+
+commit ee97dc7db4cbda33e4241c2d85b42d1835bc8a35 upstream.
+
+In s390 des and 3des ctr mode there is one preallocated page
+used to speed up the en/decryption. This page is not protected
+against concurrent usage and thus there is a potential of data
+corruption with multiple threads.
+
+The fix introduces locking/unlocking the ctr page and a slower
+fallback solution at concurrency situations.
+
+Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/crypto/des_s390.c |   69 ++++++++++++++++++++++++++++++--------------
+ 1 file changed, 48 insertions(+), 21 deletions(-)
+
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -25,6 +25,7 @@
+ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ struct s390_des_ctx {
+       u8 iv[DES_BLOCK_SIZE];
+@@ -368,54 +369,80 @@ static struct crypto_alg cbc_des3_alg =
+       }
+ };
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++      unsigned int i, n;
++
++      /* align to block size, max. PAGE_SIZE */
++      n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
++      for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
++              memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
++              crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
++      }
++      return n;
++}
++
+ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+-                          struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
++                          struct s390_des_ctx *ctx,
++                          struct blkcipher_walk *walk)
+ {
+       int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+-      unsigned int i, n, nbytes;
+-      u8 buf[DES_BLOCK_SIZE];
+-      u8 *out, *in;
++      unsigned int n, nbytes;
++      u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
++      u8 *out, *in, *ctrptr = ctrbuf;
++
++      if (!walk->nbytes)
++              return ret;
+-      memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
++      if (spin_trylock(&ctrblk_lock))
++              ctrptr = ctrblk;
++
++      memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+       while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               while (nbytes >= DES_BLOCK_SIZE) {
+-                      /* align to block size, max. PAGE_SIZE */
+-                      n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+-                              nbytes & ~(DES_BLOCK_SIZE - 1);
+-                      for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+-                              memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+-                                     DES_BLOCK_SIZE);
+-                              crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+-                      }
+-                      ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+-                      if (ret < 0 || ret != n)
++                      if (ctrptr == ctrblk)
++                              n = __ctrblk_init(ctrptr, nbytes);
++                      else
++                              n = DES_BLOCK_SIZE;
++                      ret = crypt_s390_kmctr(func, ctx->key, out, in,
++                                             n, ctrptr);
++                      if (ret < 0 || ret != n) {
++                              if (ctrptr == ctrblk)
++                                      spin_unlock(&ctrblk_lock);
+                               return -EIO;
++                      }
+                       if (n > DES_BLOCK_SIZE)
+-                              memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
++                              memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
+                                      DES_BLOCK_SIZE);
+-                      crypto_inc(ctrblk, DES_BLOCK_SIZE);
++                      crypto_inc(ctrptr, DES_BLOCK_SIZE);
+                       out += n;
+                       in += n;
+                       nbytes -= n;
+               }
+               ret = blkcipher_walk_done(desc, walk, nbytes);
+       }
+-
++      if (ctrptr == ctrblk) {
++              if (nbytes)
++                      memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
++              else
++                      memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
++              spin_unlock(&ctrblk_lock);
++      }
+       /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+       if (nbytes) {
+               out = walk->dst.virt.addr;
+               in = walk->src.virt.addr;
+               ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+-                                     DES_BLOCK_SIZE, ctrblk);
++                                     DES_BLOCK_SIZE, ctrbuf);
+               if (ret < 0 || ret != DES_BLOCK_SIZE)
+                       return -EIO;
+               memcpy(out, buf, nbytes);
+-              crypto_inc(ctrblk, DES_BLOCK_SIZE);
++              crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+               ret = blkcipher_walk_done(desc, walk, 0);
++              memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
+       }
+-      memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+       return ret;
+ }
diff --git a/queue-3.10/irqchip-armada-370-xp-fix-ipi-race-condition.patch b/queue-3.10/irqchip-armada-370-xp-fix-ipi-race-condition.patch
new file mode 100644 (file)
index 0000000..f11d4df
--- /dev/null
@@ -0,0 +1,53 @@
+From a6f089e95b1e08cdea9633d50ad20aa5d44ba64d Mon Sep 17 00:00:00 2001
+From: Lior Amsalem <alior@marvell.com>
+Date: Mon, 25 Nov 2013 17:26:44 +0100
+Subject: irqchip: armada-370-xp: fix IPI race condition
+
+From: Lior Amsalem <alior@marvell.com>
+
+commit a6f089e95b1e08cdea9633d50ad20aa5d44ba64d upstream.
+
+In the Armada 370/XP driver, when we receive an IRQ 0, we read the
+list of doorbells that caused the interrupt from register
+ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS. This gives the list of IPIs that
+were generated. However, instead of acknowledging only the IPIs that
+were generated, we acknowledge *all* the IPIs, by writing
+~IPI_DOORBELL_MASK in the ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS register.
+
+This creates a race condition: if a new IPI that isn't part of the
+ones read into the temporary "ipimask" variable is fired before we
+acknowledge all IPIs, then we will simply loose it. This is causing
+scheduling hangs on SMP intensive workloads.
+
+It is important to mention that this ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
+register has the following behavior: "A CPU write of 0 clears the bits
+in this field. A CPU write of 1 has no effect". This is what allows us
+to simply write ~ipimask to acknoledge the handled IPIs.
+
+Notice that the same problem is present in the MSI implementation, but
+it will be fixed as a separate patch, so that this IPI fix can be
+pushed to older stable versions as appropriate (all the way to 3.8),
+while the MSI code only appeared in 3.13.
+
+Signed-off-by: Lior Amsalem <alior@marvell.com>
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Fixes: 344e873e5657e8dc0 'arm: mvebu: Add IPI support via doorbells'
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/irqchip/irq-armada-370-xp.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -229,7 +229,7 @@ armada_370_xp_handle_irq(struct pt_regs
+                                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+                               & IPI_DOORBELL_MASK;
+-                      writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
++                      writel(~ipimask, per_cpu_int_base +
+                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+                       /* Handle all pending doorbells */
index 28db5b4d597e6e6fcea9e0fdd3390277533b4f74..88bc3b844bc4dfcb6ba15f99c9182043e5b3e5e9 100644 (file)
@@ -1 +1,11 @@
 selinux-fix-kernel-bug-on-empty-security-contexts.patch
+btrfs-disable-snapshot-aware-defrag-for-now.patch
+crypto-s390-fix-concurrency-issue-in-aes-ctr-mode.patch
+crypto-s390-fix-des-and-des3_ede-cbc-concurrency-issue.patch
+crypto-s390-fix-des-and-des3_ede-ctr-concurrency-issue.patch
+irqchip-armada-370-xp-fix-ipi-race-condition.patch
+arm64-vdso-update-wtm-fields-for-clock_monotonic_coarse.patch
+arm64-vdso-prevent-ld-from-aligning-pt_load-segments-to-64k.patch
+arm64-invalidate-the-tlb-when-replacing-pmd-entries-during-boot.patch
+arm64-vdso-fix-coarse-clock-handling.patch
+arm64-add-dsb-after-icache-flush-in-__flush_icache_all.patch