--- /dev/null
+From ea208f646c8fb91c39c852e952fc911e1ad045ab Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@stericsson.com>
+Date: Wed, 26 May 2010 07:37:57 +0100
+Subject: ARM: 6144/1: TCM memory bug freeing bug
+
+From: Linus Walleij <linus.walleij@stericsson.com>
+
+commit ea208f646c8fb91c39c852e952fc911e1ad045ab upstream.
+
+This fixes a bug in mm/init.c when freeing the TCM compile memory,
+this was being referred to as a char * which is incorrect: this
+will dereference the pointer and feed in the value at the location
+instead of the address to it. Change it to a plain char and use
+&(char) to reference it.
+
+Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/init.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -632,10 +632,10 @@ void __init mem_init(void)
+ void free_initmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+- extern char *__tcm_start, *__tcm_end;
++ extern char __tcm_start, __tcm_end;
+
+- totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
+- __phys_to_pfn(__pa(__tcm_end)),
++ totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
++ __phys_to_pfn(__pa(&__tcm_end)),
+ "TCM link");
+ #endif
+
--- /dev/null
+From 3defb2476166445982a90c12d33f8947e75476c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Va=C5=A1ut?= <marek.vasut@gmail.com>
+Date: Wed, 26 May 2010 23:53:09 +0100
+Subject: ARM: 6146/1: sa1111: Prevent deadlock in resume path
+
+From: =?UTF-8?q?Marek=20Va=C5=A1ut?= <marek.vasut@gmail.com>
+
+commit 3defb2476166445982a90c12d33f8947e75476c4 upstream.
+
+This patch reorganises the sa1111_resume() function in a manner the spinlock
+happens after calling the sa1111_wake(). This fixes two bugs:
+
+1) This function called sa1111_wake() which tried to claim the same spinlock
+ the sa1111_resume() already claimed. This would result in certain deadlock.
+
+ Original idea for this part: Russell King <rmk+kernel@arm.linux.org.uk>
+
+2) The function didn't unlock the spinlock in case the chip didn't report
+ correct ID.
+
+ Original idea for this part: Julia Lawall <julia@diku.dk>
+
+Signed-off-by: Marek Vasut <marek.vasut@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/common/sa1111.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/common/sa1111.c
++++ b/arch/arm/common/sa1111.c
+@@ -887,8 +887,6 @@ static int sa1111_resume(struct platform
+ if (!save)
+ return 0;
+
+- spin_lock_irqsave(&sachip->lock, flags);
+-
+ /*
+ * Ensure that the SA1111 is still here.
+ * FIXME: shouldn't do this here.
+@@ -905,6 +903,13 @@ static int sa1111_resume(struct platform
+ * First of all, wake up the chip.
+ */
+ sa1111_wake(sachip);
++
++ /*
++ * Only lock for write ops. Also, sa1111_wake must be called with
++ * released spinlock!
++ */
++ spin_lock_irqsave(&sachip->lock, flags);
++
+ sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
+ sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
+
--- /dev/null
+From 9a40ac86152c9cffd3dca482a15ddf9a8c5716b3 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 4 Jun 2010 04:05:15 +0100
+Subject: ARM: 6164/1: Add kto and kfrom to input operands list.
+
+From: Khem Raj <raj.khem@gmail.com>
+
+commit 9a40ac86152c9cffd3dca482a15ddf9a8c5716b3 upstream.
+
+When functions incoming parameters are not in input operands list gcc
+4.5 does not load the parameters into registers before calling this
+function but the inline assembly assumes valid addresses inside this
+function. This breaks the code because r0 and r1 are invalid when
+execution enters v4wb_copy_user_page ()
+
+Also the constant needs to be used as third input operand so account
+for that as well.
+
+Tested on qemu arm.
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/copypage-feroceon.c | 4 ++--
+ arch/arm/mm/copypage-v4wb.c | 4 ++--
+ arch/arm/mm/copypage-v4wt.c | 4 ++--
+ arch/arm/mm/copypage-xsc3.c | 4 ++--
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/mm/copypage-feroceon.c
++++ b/arch/arm/mm/copypage-feroceon.c
+@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const
+ {
+ asm("\
+ stmfd sp!, {r4-r9, lr} \n\
+- mov ip, %0 \n\
++ mov ip, %2 \n\
+ 1: mov lr, r1 \n\
+ ldmia r1!, {r2 - r9} \n\
+ pld [lr, #32] \n\
+@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
+ ldmfd sp!, {r4-r9, pc}"
+ :
+- : "I" (PAGE_SIZE));
++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
+ }
+
+ void feroceon_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-v4wb.c
++++ b/arch/arm/mm/copypage-v4wb.c
+@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const voi
+ {
+ asm("\
+ stmfd sp!, {r4, lr} @ 2\n\
+- mov r2, %0 @ 1\n\
++ mov r2, %2 @ 1\n\
+ ldmia r1!, {r3, r4, ip, lr} @ 4\n\
+ 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
+ stmia r0!, {r3, r4, ip, lr} @ 4\n\
+@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const voi
+ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
+ ldmfd sp!, {r4, pc} @ 3"
+ :
+- : "I" (PAGE_SIZE / 64));
++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+ }
+
+ void v4wb_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-v4wt.c
++++ b/arch/arm/mm/copypage-v4wt.c
+@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const voi
+ {
+ asm("\
+ stmfd sp!, {r4, lr} @ 2\n\
+- mov r2, %0 @ 1\n\
++ mov r2, %2 @ 1\n\
+ ldmia r1!, {r3, r4, ip, lr} @ 4\n\
+ 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
+ ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
+@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const voi
+ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
+ ldmfd sp!, {r4, pc} @ 3"
+ :
+- : "I" (PAGE_SIZE / 64));
++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+ }
+
+ void v4wt_copy_user_highpage(struct page *to, struct page *from,
+--- a/arch/arm/mm/copypage-xsc3.c
++++ b/arch/arm/mm/copypage-xsc3.c
+@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const
+ {
+ asm("\
+ stmfd sp!, {r4, r5, lr} \n\
+- mov lr, %0 \n\
++ mov lr, %2 \n\
+ \n\
+ pld [r1, #0] \n\
+ pld [r1, #32] \n\
+@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const
+ \n\
+ ldmfd sp!, {r4, r5, pc}"
+ :
+- : "I" (PAGE_SIZE / 64 - 1));
++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
+ }
+
+ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
--- /dev/null
+From 5e27fb78df95e027723af2c90ecc9b4527ae59e9 Mon Sep 17 00:00:00 2001
+From: Anfei <anfei.zhou@gmail.com>
+Date: Tue, 8 Jun 2010 15:16:49 +0100
+Subject: ARM: 6166/1: Proper prefetch abort handling on pre-ARMv6
+
+From: Anfei <anfei.zhou@gmail.com>
+
+commit 5e27fb78df95e027723af2c90ecc9b4527ae59e9 upstream.
+
+Instruction faults on pre-ARMv6 CPUs are interpreted as
+a 'translation fault', but do_translation_fault doesn't
+handle well if user mode trying to run instruction above
+TASK_SIZE, and result in the infinite retry of that
+instruction.
+
+Signed-off-by: Anfei Zhou <anfei.zhou@gmail.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/fault.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -386,6 +386,9 @@ do_translation_fault(unsigned long addr,
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (user_mode(regs))
++ goto bad_area;
++
+ index = pgd_index(addr);
+
+ /*
--- /dev/null
+From 138de1c44a8e0606501cd8593407e9248e84f1b7 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 27 May 2010 08:23:29 +0100
+Subject: ARM: VFP: Fix vfp_put_double() for d16-d31
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 138de1c44a8e0606501cd8593407e9248e84f1b7 upstream.
+
+vfp_put_double() takes the double value in r0,r1 not r1,r2.
+
+Reported-by: Tarun Kanti DebBarma <tarun.kanti@ti.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/vfp/vfphw.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
+ #ifdef CONFIG_VFPv3
+ @ d16 - d31 registers
+ .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+-1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
++1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
+ mov pc, lr
+ .org 1b + 8
+ .endr
--- /dev/null
+From c0dc72bad9cf21071f5e4005de46f7c8b67a138a Mon Sep 17 00:00:00 2001
+From: Sebastien Dugue <sebastien.dugue@bull.net>
+Date: Thu, 20 May 2010 15:58:22 -0700
+Subject: mlx4_core: Fix possible chunk sg list overflow in mlx4_alloc_icm()
+
+From: Sebastien Dugue <sebastien.dugue@bull.net>
+
+commit c0dc72bad9cf21071f5e4005de46f7c8b67a138a upstream.
+
+If the number of sg entries in the ICM chunk reaches MLX4_ICM_CHUNK_LEN,
+we must set chunk to NULL even for coherent mappings so that the next
+time through the loop will allocate another chunk. Otherwise we'll
+overflow the sg list the next time through the loop. This will lead to
+memory corruption if this case is hit.
+
+mthca does not have this bug.
+
+Signed-off-by: Sebastien Dugue <sebastien.dugue@bull.net>
+Signed-off-by: Roland Dreier <rolandd@cisco.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/mlx4/icm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/mlx4/icm.c
++++ b/drivers/net/mlx4/icm.c
+@@ -174,9 +174,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct m
+
+ if (chunk->nsg <= 0)
+ goto fail;
++ }
+
++ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+ chunk = NULL;
+- }
+
+ npages -= 1 << cur_order;
+ } else {
xtensa-set-arch_kmalloc_minalign.patch
blackfin-set-arch_kmalloc_minalign.patch
tmpfs-insert-tmpfs-cache-pages-to-inactive-list-at-first.patch
+mlx4_core-fix-possible-chunk-sg-list-overflow-in-mlx4_alloc_icm.patch
+arm-6166-1-proper-prefetch-abort-handling-on-pre-armv6.patch
+arm-6164-1-add-kto-and-kfrom-to-input-operands-list.patch
+arm-6146-1-sa1111-prevent-deadlock-in-resume-path.patch
+arm-6144-1-tcm-memory-bug-freeing-bug.patch
+arm-vfp-fix-vfp_put_double-for-d16-d31.patch