]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.34 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Thu, 24 Jun 2010 22:40:52 +0000 (15:40 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 24 Jun 2010 22:40:52 +0000 (15:40 -0700)
queue-2.6.34/aio-fix-the-compat-vectored-operations.patch [new file with mode: 0644]
queue-2.6.34/arm-6139-1-armv7-use-the-inner-shareable-i-cache-on-mp.patch [new file with mode: 0644]
queue-2.6.34/arm-6144-1-tcm-memory-bug-freeing-bug.patch [new file with mode: 0644]
queue-2.6.34/arm-6146-1-sa1111-prevent-deadlock-in-resume-path.patch [new file with mode: 0644]
queue-2.6.34/arm-6164-1-add-kto-and-kfrom-to-input-operands-list.patch [new file with mode: 0644]
queue-2.6.34/arm-6166-1-proper-prefetch-abort-handling-on-pre-armv6.patch [new file with mode: 0644]
queue-2.6.34/arm-vfp-fix-vfp_put_double-for-d16-d31.patch [new file with mode: 0644]
queue-2.6.34/idr-fix-backtrack-logic-in-idr_remove_all.patch [new file with mode: 0644]
queue-2.6.34/mlx4_core-fix-possible-chunk-sg-list-overflow-in-mlx4_alloc_icm.patch [new file with mode: 0644]
queue-2.6.34/series

diff --git a/queue-2.6.34/aio-fix-the-compat-vectored-operations.patch b/queue-2.6.34/aio-fix-the-compat-vectored-operations.patch
new file mode 100644 (file)
index 0000000..d93f5bd
--- /dev/null
@@ -0,0 +1,209 @@
+From 9d85cba718efeef9ca00ce3f7f34f5880737aa9b Mon Sep 17 00:00:00 2001
+From: Jeff Moyer <jmoyer@redhat.com>
+Date: Wed, 26 May 2010 14:44:26 -0700
+Subject: aio: fix the compat vectored operations
+
+From: Jeff Moyer <jmoyer@redhat.com>
+
+commit 9d85cba718efeef9ca00ce3f7f34f5880737aa9b upstream.
+
+The aio compat code was not converting the struct iovecs from 32bit to
+64bit pointers, causing either EINVAL to be returned from io_getevents, or
+EFAULT as the result of the I/O.  This patch passes a compat flag to
+io_submit to signal that pointer conversion is necessary for a given iocb
+array.
+
+A variant of this was tested by Michael Tokarev.  I have also updated the
+libaio test harness to exercise this code path with good success.
+Further, I grabbed a copy of ltp and ran the
+testcases/kernel/syscall/readv and writev tests there (compiled with -m32
+on my 64bit system).  All seems happy, but extra eyes on this would be
+welcome.
+
+[akpm@linux-foundation.org: coding-style fixes]
+[akpm@linux-foundation.org: fix CONFIG_COMPAT=n build]
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Reported-by: Michael Tokarev <mjt@tls.msk.ru>
+Cc: Zach Brown <zach.brown@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/aio.c            |   65 ++++++++++++++++++++++++++++++++--------------------
+ fs/compat.c         |    2 -
+ include/linux/aio.h |    5 ++++
+ 3 files changed, 47 insertions(+), 25 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -36,6 +36,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/mempool.h>
+ #include <linux/hash.h>
++#include <linux/compat.h>
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *i
+       return ret;
+ }
+-static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
++static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ {
+       ssize_t ret;
+-      ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
+-                                  kiocb->ki_nbytes, 1,
+-                                  &kiocb->ki_inline_vec, &kiocb->ki_iovec);
++#ifdef CONFIG_COMPAT
++      if (compat)
++              ret = compat_rw_copy_check_uvector(type,
++                              (struct compat_iovec __user *)kiocb->ki_buf,
++                              kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++                              &kiocb->ki_iovec);
++      else
++#endif
++              ret = rw_copy_check_uvector(type,
++                              (struct iovec __user *)kiocb->ki_buf,
++                              kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++                              &kiocb->ki_iovec);
+       if (ret < 0)
+               goto out;
+@@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(s
+  *    Performs the initial checks and aio retry method
+  *    setup for the kiocb at the time of io submission.
+  */
+-static ssize_t aio_setup_iocb(struct kiocb *kiocb)
++static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ {
+       struct file *file = kiocb->ki_filp;
+       ssize_t ret = 0;
+@@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kio
+               ret = security_file_permission(file, MAY_READ);
+               if (unlikely(ret))
+                       break;
+-              ret = aio_setup_vectored_rw(READ, kiocb);
++              ret = aio_setup_vectored_rw(READ, kiocb, compat);
+               if (ret)
+                       break;
+               ret = -EINVAL;
+@@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kio
+               ret = security_file_permission(file, MAY_WRITE);
+               if (unlikely(ret))
+                       break;
+-              ret = aio_setup_vectored_rw(WRITE, kiocb);
++              ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
+               if (ret)
+                       break;
+               ret = -EINVAL;
+@@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_
+ }
+ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+-                       struct iocb *iocb, struct hlist_head *batch_hash)
++                       struct iocb *iocb, struct hlist_head *batch_hash,
++                       bool compat)
+ {
+       struct kiocb *req;
+       struct file *file;
+@@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *
+       req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
+       req->ki_opcode = iocb->aio_lio_opcode;
+-      ret = aio_setup_iocb(req);
++      ret = aio_setup_iocb(req, compat);
+       if (ret)
+               goto out_put_req;
+@@ -1637,20 +1648,8 @@ out_put_req:
+       return ret;
+ }
+-/* sys_io_submit:
+- *    Queue the nr iocbs pointed to by iocbpp for processing.  Returns
+- *    the number of iocbs queued.  May return -EINVAL if the aio_context
+- *    specified by ctx_id is invalid, if nr is < 0, if the iocb at
+- *    *iocbpp[0] is not properly initialized, if the operation specified
+- *    is invalid for the file descriptor in the iocb.  May fail with
+- *    -EFAULT if any of the data structures point to invalid data.  May
+- *    fail with -EBADF if the file descriptor specified in the first
+- *    iocb is invalid.  May fail with -EAGAIN if insufficient resources
+- *    are available to queue any iocbs.  Will return 0 if nr is 0.  Will
+- *    fail with -ENOSYS if not implemented.
+- */
+-SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
+-              struct iocb __user * __user *, iocbpp)
++long do_io_submit(aio_context_t ctx_id, long nr,
++                struct iocb __user *__user *iocbpp, bool compat)
+ {
+       struct kioctx *ctx;
+       long ret = 0;
+@@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t
+                       break;
+               }
+-              ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
++              ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
+               if (ret)
+                       break;
+       }
+@@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t
+       return i ? i : ret;
+ }
++/* sys_io_submit:
++ *    Queue the nr iocbs pointed to by iocbpp for processing.  Returns
++ *    the number of iocbs queued.  May return -EINVAL if the aio_context
++ *    specified by ctx_id is invalid, if nr is < 0, if the iocb at
++ *    *iocbpp[0] is not properly initialized, if the operation specified
++ *    is invalid for the file descriptor in the iocb.  May fail with
++ *    -EFAULT if any of the data structures point to invalid data.  May
++ *    fail with -EBADF if the file descriptor specified in the first
++ *    iocb is invalid.  May fail with -EAGAIN if insufficient resources
++ *    are available to queue any iocbs.  Will return 0 if nr is 0.  Will
++ *    fail with -ENOSYS if not implemented.
++ */
++SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
++              struct iocb __user * __user *, iocbpp)
++{
++      return do_io_submit(ctx_id, nr, iocbpp, 0);
++}
++
+ /* lookup_kiocb
+  *    Finds a given iocb for cancellation.
+  */
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -600,7 +600,7 @@ compat_sys_io_submit(aio_context_t ctx_i
+       iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
+       ret = copy_iocb(nr, iocb, iocb64);
+       if (!ret)
+-              ret = sys_io_submit(ctx_id, nr, iocb64);
++              ret = do_io_submit(ctx_id, nr, iocb64, 1);
+       return ret;
+ }
+--- a/include/linux/aio.h
++++ b/include/linux/aio.h
+@@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb
+ extern int aio_complete(struct kiocb *iocb, long res, long res2);
+ struct mm_struct;
+ extern void exit_aio(struct mm_struct *mm);
++extern long do_io_submit(aio_context_t ctx_id, long nr,
++                       struct iocb __user *__user *iocbpp, bool compat);
+ #else
+ static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
+ static inline int aio_put_req(struct kiocb *iocb) { return 0; }
+@@ -219,6 +221,9 @@ static inline void kick_iocb(struct kioc
+ static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
+ struct mm_struct;
+ static inline void exit_aio(struct mm_struct *mm) { }
++static inline long do_io_submit(aio_context_t ctx_id, long nr,
++                              struct iocb __user * __user *iocbpp,
++                              bool compat) { return 0; }
+ #endif /* CONFIG_AIO */
+ static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/queue-2.6.34/arm-6139-1-armv7-use-the-inner-shareable-i-cache-on-mp.patch b/queue-2.6.34/arm-6139-1-armv7-use-the-inner-shareable-i-cache-on-mp.patch
new file mode 100644 (file)
index 0000000..12875d5
--- /dev/null
@@ -0,0 +1,33 @@
+From a901ff715d53c109821cbbd9d7ea1f2a311646a9 Mon Sep 17 00:00:00 2001
+From: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Date: Thu, 20 May 2010 08:33:38 +0100
+Subject: ARM: 6139/1: ARMv7: Use the Inner Shareable I-cache on MP
+
+From: Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+commit a901ff715d53c109821cbbd9d7ea1f2a311646a9 upstream.
+
+This patch fixes the flush_cache_all for ARMv7 SMP.It was
+missing from commit b8349b569aae661dea9d59d7d2ee587ccea3336c
+
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index 06a90dc..37c8157 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
+  THUMB(       stmfd   sp!, {r4-r7, r9-r11, lr}        )
+       bl      v7_flush_dcache_all
+       mov     r0, #0
++#ifdef CONFIG_SMP
++      mcr     p15, 0, r0, c7, c1, 0           @ invalidate I-cache inner shareable
++#else
+       mcr     p15, 0, r0, c7, c5, 0           @ I+BTB cache invalidate
++#endif
+  ARM( ldmfd   sp!, {r4-r5, r7, r9-r11, lr}    )
+  THUMB(       ldmfd   sp!, {r4-r7, r9-r11, lr}        )
+       mov     pc, lr
diff --git a/queue-2.6.34/arm-6144-1-tcm-memory-bug-freeing-bug.patch b/queue-2.6.34/arm-6144-1-tcm-memory-bug-freeing-bug.patch
new file mode 100644 (file)
index 0000000..db391b0
--- /dev/null
@@ -0,0 +1,39 @@
+From ea208f646c8fb91c39c852e952fc911e1ad045ab Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@stericsson.com>
+Date: Wed, 26 May 2010 07:37:57 +0100
+Subject: ARM: 6144/1: TCM memory bug freeing bug
+
+From: Linus Walleij <linus.walleij@stericsson.com>
+
+commit ea208f646c8fb91c39c852e952fc911e1ad045ab upstream.
+
+This fixes a bug in mm/init.c when freeing the TCM compile memory,
+this was being referred to as a char * which is incorrect: this
+will dereference the pointer and feed in the value at the location
+instead of the address to it. Change it to a plain char and use
+&(char) to reference it.
+
+Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/init.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -712,10 +712,10 @@ void __init mem_init(void)
+ void free_initmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+-      extern char *__tcm_start, *__tcm_end;
++      extern char __tcm_start, __tcm_end;
+-      totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
+-                                  __phys_to_pfn(__pa(__tcm_end)),
++      totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
++                                  __phys_to_pfn(__pa(&__tcm_end)),
+                                   "TCM link");
+ #endif
diff --git a/queue-2.6.34/arm-6146-1-sa1111-prevent-deadlock-in-resume-path.patch b/queue-2.6.34/arm-6146-1-sa1111-prevent-deadlock-in-resume-path.patch
new file mode 100644 (file)
index 0000000..92ef9a8
--- /dev/null
@@ -0,0 +1,55 @@
+From 3defb2476166445982a90c12d33f8947e75476c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Va=C5=A1ut?= <marek.vasut@gmail.com>
+Date: Wed, 26 May 2010 23:53:09 +0100
+Subject: ARM: 6146/1: sa1111: Prevent deadlock in resume path
+
+From: =?UTF-8?q?Marek=20Va=C5=A1ut?= <marek.vasut@gmail.com>
+
+commit 3defb2476166445982a90c12d33f8947e75476c4 upstream.
+
+This patch reorganises the sa1111_resume() function in a manner the spinlock
+happens after calling the sa1111_wake(). This fixes two bugs:
+
+1) This function called sa1111_wake() which tried to claim the same spinlock
+   the sa1111_resume() already claimed. This would result in certain deadlock.
+
+   Original idea for this part: Russell King <rmk+kernel@arm.linux.org.uk>
+
+2) The function didn't unlock the spinlock in case the chip didn't report
+   correct ID.
+
+   Original idea for this part: Julia Lawall <julia@diku.dk>
+
+Signed-off-by: Marek Vasut <marek.vasut@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/common/sa1111.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/common/sa1111.c
++++ b/arch/arm/common/sa1111.c
+@@ -951,8 +951,6 @@ static int sa1111_resume(struct platform
+       if (!save)
+               return 0;
+-      spin_lock_irqsave(&sachip->lock, flags);
+-
+       /*
+        * Ensure that the SA1111 is still here.
+        * FIXME: shouldn't do this here.
+@@ -969,6 +967,13 @@ static int sa1111_resume(struct platform
+        * First of all, wake up the chip.
+        */
+       sa1111_wake(sachip);
++
++      /*
++       * Only lock for write ops. Also, sa1111_wake must be called with
++       * released spinlock!
++       */
++      spin_lock_irqsave(&sachip->lock, flags);
++
+       sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
+       sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
diff --git a/queue-2.6.34/arm-6164-1-add-kto-and-kfrom-to-input-operands-list.patch b/queue-2.6.34/arm-6164-1-add-kto-and-kfrom-to-input-operands-list.patch
new file mode 100644 (file)
index 0000000..c375cf8
--- /dev/null
@@ -0,0 +1,111 @@
+From 9a40ac86152c9cffd3dca482a15ddf9a8c5716b3 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 4 Jun 2010 04:05:15 +0100
+Subject: ARM: 6164/1: Add kto and kfrom to input operands list.
+
+From: Khem Raj <raj.khem@gmail.com>
+
+commit 9a40ac86152c9cffd3dca482a15ddf9a8c5716b3 upstream.
+
+When functions incoming parameters are not in input operands list gcc
+4.5 does not load the parameters into registers before calling this
+function but the inline assembly assumes valid addresses inside this
+function. This breaks the code because r0 and r1 are invalid when
+execution enters v4wb_copy_user_page ()
+
+Also the constant needs to be used as third input operand so account
+for that as well.
+
+Tested on qemu arm.
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/copypage-feroceon.c |    4 ++--
+ arch/arm/mm/copypage-v4wb.c     |    4 ++--
+ arch/arm/mm/copypage-v4wt.c     |    4 ++--
+ arch/arm/mm/copypage-xsc3.c     |    4 ++--
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/mm/copypage-feroceon.c
++++ b/arch/arm/mm/copypage-feroceon.c
+@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const
+ {
+       asm("\
+       stmfd   sp!, {r4-r9, lr}                \n\
+-      mov     ip, %0                          \n\
++      mov     ip, %2                          \n\
+ 1:    mov     lr, r1                          \n\
+       ldmia   r1!, {r2 - r9}                  \n\
+       pld     [lr, #32]                       \n\
+@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const
+       mcr     p15, 0, ip, c7, c10, 4          @ drain WB\n\
+       ldmfd   sp!, {r4-r9, pc}"
+       :
+-      : "I" (PAGE_SIZE));
++      : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
+ }
+ void feroceon_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-v4wb.c
++++ b/arch/arm/mm/copypage-v4wb.c
+@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const voi
+ {
+       asm("\
+       stmfd   sp!, {r4, lr}                   @ 2\n\
+-      mov     r2, %0                          @ 1\n\
++      mov     r2, %2                          @ 1\n\
+       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
+ 1:    mcr     p15, 0, r0, c7, c6, 1           @ 1   invalidate D line\n\
+       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
+@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const voi
+       mcr     p15, 0, r1, c7, c10, 4          @ 1   drain WB\n\
+       ldmfd    sp!, {r4, pc}                  @ 3"
+       :
+-      : "I" (PAGE_SIZE / 64));
++      : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+ }
+ void v4wb_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-v4wt.c
++++ b/arch/arm/mm/copypage-v4wt.c
+@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const voi
+ {
+       asm("\
+       stmfd   sp!, {r4, lr}                   @ 2\n\
+-      mov     r2, %0                          @ 1\n\
++      mov     r2, %2                          @ 1\n\
+       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
+ 1:    stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   r1!, {r3, r4, ip, lr}           @ 4+1\n\
+@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const voi
+       mcr     p15, 0, r2, c7, c7, 0           @ flush ID cache\n\
+       ldmfd   sp!, {r4, pc}                   @ 3"
+       :
+-      : "I" (PAGE_SIZE / 64));
++      : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+ }
+ void v4wt_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-xsc3.c
++++ b/arch/arm/mm/copypage-xsc3.c
+@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const
+ {
+       asm("\
+       stmfd   sp!, {r4, r5, lr}               \n\
+-      mov     lr, %0                          \n\
++      mov     lr, %2                          \n\
+                                               \n\
+       pld     [r1, #0]                        \n\
+       pld     [r1, #32]                       \n\
+@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const
+                                               \n\
+       ldmfd   sp!, {r4, r5, pc}"
+       :
+-      : "I" (PAGE_SIZE / 64 - 1));
++      : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
+ }
+ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
diff --git a/queue-2.6.34/arm-6166-1-proper-prefetch-abort-handling-on-pre-armv6.patch b/queue-2.6.34/arm-6166-1-proper-prefetch-abort-handling-on-pre-armv6.patch
new file mode 100644 (file)
index 0000000..9ff911b
--- /dev/null
@@ -0,0 +1,35 @@
+From 5e27fb78df95e027723af2c90ecc9b4527ae59e9 Mon Sep 17 00:00:00 2001
+From: Anfei <anfei.zhou@gmail.com>
+Date: Tue, 8 Jun 2010 15:16:49 +0100
+Subject: ARM: 6166/1: Proper prefetch abort handling on pre-ARMv6
+
+From: Anfei <anfei.zhou@gmail.com>
+
+commit 5e27fb78df95e027723af2c90ecc9b4527ae59e9 upstream.
+
+Instruction faults on pre-ARMv6 CPUs are interpreted as
+a 'translation fault', but do_translation_fault doesn't
+handle well if user mode trying to run instruction above
+TASK_SIZE, and result in the infinite retry of that
+instruction.
+
+Signed-off-by: Anfei Zhou <anfei.zhou@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/fault.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr,
+       if (addr < TASK_SIZE)
+               return do_page_fault(addr, fsr, regs);
++      if (user_mode(regs))
++              goto bad_area;
++
+       index = pgd_index(addr);
+       /*
diff --git a/queue-2.6.34/arm-vfp-fix-vfp_put_double-for-d16-d31.patch b/queue-2.6.34/arm-vfp-fix-vfp_put_double-for-d16-d31.patch
new file mode 100644 (file)
index 0000000..879c6bb
--- /dev/null
@@ -0,0 +1,30 @@
+From 138de1c44a8e0606501cd8593407e9248e84f1b7 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 27 May 2010 08:23:29 +0100
+Subject: ARM: VFP: Fix vfp_put_double() for d16-d31
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 138de1c44a8e0606501cd8593407e9248e84f1b7 upstream.
+
+vfp_put_double() takes the double value in r0,r1 not r1,r2.
+
+Reported-by: Tarun Kanti DebBarma <tarun.kanti@ti.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/vfp/vfphw.S |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
+ #ifdef CONFIG_VFPv3
+       @ d16 - d31 registers
+       .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+-1:    mcrr    p11, 3, r1, r2, c\dr    @ fmdrr r1, r2, d\dr
++1:    mcrr    p11, 3, r0, r1, c\dr    @ fmdrr r0, r1, d\dr
+       mov     pc, lr
+       .org    1b + 8
+       .endr
diff --git a/queue-2.6.34/idr-fix-backtrack-logic-in-idr_remove_all.patch b/queue-2.6.34/idr-fix-backtrack-logic-in-idr_remove_all.patch
new file mode 100644 (file)
index 0000000..336e051
--- /dev/null
@@ -0,0 +1,87 @@
+From 2dcb22b346be7b7b7e630a8970d69cf3f1111ec1 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@nokia.com>
+Date: Wed, 26 May 2010 14:43:38 -0700
+Subject: idr: fix backtrack logic in idr_remove_all
+
+From: Imre Deak <imre.deak@nokia.com>
+
+commit 2dcb22b346be7b7b7e630a8970d69cf3f1111ec1 upstream.
+
+Currently idr_remove_all will fail with a use after free error if
+idr::layers is bigger than 2, which on 32 bit systems corresponds to items
+more than 1024.  This is due to stepping back too many levels during
+backtracking.  For simplicity let's assume that IDR_BITS=1 -> we have 2
+nodes at each level below the root node and each leaf node stores two IDs.
+ (In reality for 32 bit systems IDR_BITS=5, with 32 nodes at each sub-root
+level and 32 IDs in each leaf node).  The sequence of freeing the nodes at
+the moment is as follows:
+
+layer
+1 ->                       a(7)
+2 ->            b(3)                  c(5)
+3 ->        d(1)   e(2)           f(4)    g(6)
+
+Until step 4 things go fine, but then node c is freed, whereas node g
+should be freed first.  Since node c contains the pointer to node g we'll
+have a use after free error at step 6.
+
+How many levels we step back after visiting the leaf nodes is currently
+determined by the msb of the id we are currently visiting:
+
+Step
+1.          node d with IDs 0,1 is freed, current ID is advanced to 2.
+            msb of the current ID bit 1. This means we need to step back
+            1 level to node b and take the next sibling, node e.
+2-3.        node e with IDs 2,3 is freed, current ID is 4, msb is bit 2.
+            This means we need to step back 2 levels to node a, freeing
+            node b on the way.
+4-5.        node f with IDs 4,5 is freed, current ID is 6, msb is still
+            bit 2. This means we again need to step back 2 levels to node
+            a and free c on the way.
+6.          We should visit node g, but its pointer is not available as
+            node c was freed.
+
+The fix changes how we determine the number of levels to step back.
+Instead of deducting this merely from the msb of the current ID, we should
+really check if advancing the ID causes an overflow to a bit position
+corresponding to a given layer.  In the above example overflow from bit 0
+to bit 1 should mean stepping back 1 level.  Overflow from bit 1 to bit 2
+should mean stepping back 2 levels and so on.
+
+The fix was tested with IDs up to 1 << 20, which corresponds to 4 layers
+on 32 bit systems.
+
+Signed-off-by: Imre Deak <imre.deak@nokia.com>
+Reviewed-by: Tejun Heo <tj@kernel.org>
+Cc: Eric Paris <eparis@redhat.com>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/idr.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
+ void idr_remove_all(struct idr *idp)
+ {
+       int n, id, max;
++      int bt_mask;
+       struct idr_layer *p;
+       struct idr_layer *pa[MAX_LEVEL];
+       struct idr_layer **paa = &pa[0];
+@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
+                       p = p->ary[(id >> n) & IDR_MASK];
+               }
++              bt_mask = id;
+               id += 1 << n;
+-              while (n < fls(id)) {
++              /* Get the highest bit that the above add changed from 0->1. */
++              while (n < fls(id ^ bt_mask)) {
+                       if (p)
+                               free_layer(p);
+                       n += IDR_BITS;
diff --git a/queue-2.6.34/mlx4_core-fix-possible-chunk-sg-list-overflow-in-mlx4_alloc_icm.patch b/queue-2.6.34/mlx4_core-fix-possible-chunk-sg-list-overflow-in-mlx4_alloc_icm.patch
new file mode 100644 (file)
index 0000000..10039f4
--- /dev/null
@@ -0,0 +1,39 @@
+From c0dc72bad9cf21071f5e4005de46f7c8b67a138a Mon Sep 17 00:00:00 2001
+From: Sebastien Dugue <sebastien.dugue@bull.net>
+Date: Thu, 20 May 2010 15:58:22 -0700
+Subject: mlx4_core: Fix possible chunk sg list overflow in mlx4_alloc_icm()
+
+From: Sebastien Dugue <sebastien.dugue@bull.net>
+
+commit c0dc72bad9cf21071f5e4005de46f7c8b67a138a upstream.
+
+If the number of sg entries in the ICM chunk reaches MLX4_ICM_CHUNK_LEN,
+we must set chunk to NULL even for coherent mappings so that the next
+time through the loop will allocate another chunk.  Otherwise we'll
+overflow the sg list the next time through the loop.  This will lead to
+memory corruption if this case is hit.
+
+mthca does not have this bug.
+
+Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net>
+Signed-off-by: Roland Dreier <rolandd@cisco.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/mlx4/icm.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/mlx4/icm.c
++++ b/drivers/net/mlx4/icm.c
+@@ -175,9 +175,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct m
+                               if (chunk->nsg <= 0)
+                                       goto fail;
++                      }
++                      if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+                               chunk = NULL;
+-                      }
+                       npages -= 1 << cur_order;
+               } else {
index 515d49d127f7766a7631d6ff2aa975efe2216b34..31bbcbcc96475a4390170cbcd5f6f7469fa4766a 100644 (file)
@@ -102,3 +102,12 @@ xtensa-set-arch_kmalloc_minalign.patch
 blackfin-set-arch_kmalloc_minalign.patch
 tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch
 md-manage-redundancy-group-in-sysfs-when-changing-level.patch
+mlx4_core-fix-possible-chunk-sg-list-overflow-in-mlx4_alloc_icm.patch
+arm-6139-1-armv7-use-the-inner-shareable-i-cache-on-mp.patch
+arm-6166-1-proper-prefetch-abort-handling-on-pre-armv6.patch
+arm-6164-1-add-kto-and-kfrom-to-input-operands-list.patch
+arm-6146-1-sa1111-prevent-deadlock-in-resume-path.patch
+arm-6144-1-tcm-memory-bug-freeing-bug.patch
+arm-vfp-fix-vfp_put_double-for-d16-d31.patch
+aio-fix-the-compat-vectored-operations.patch
+idr-fix-backtrack-logic-in-idr_remove_all.patch