]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jan 2014 22:34:13 +0000 (14:34 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jan 2014 22:34:13 +0000 (14:34 -0800)
added patches:
arm64-avoid-cache-flushing-in-flush_dcache_page.patch
arm64-change-kernel-stack-size-to-16k.patch
arm64-check-for-number-of-arguments-in-syscall_get-set_arguments.patch
arm64-do-not-flush-the-d-cache-for-anonymous-pages.patch
arm64-dts-reserve-the-memory-used-for-secondary-cpu-release-address.patch
arm64-fix-possible-invalid-fpsimd-initialization-state.patch
arm64-only-enable-local-interrupts-after-the-cpu-is-marked-online.patch
arm64-remove-unused-cpu_name-ascii-in-arch-arm64-mm-proc.s.patch
arm64-use-normal-noncacheable-memory-for-writecombine.patch
arm64-virt-ensure-visibility-of-__boot_cpu_mode.patch
clocksource-arch_timer-use-virtual-counters.patch

12 files changed:
queue-3.10/arm64-avoid-cache-flushing-in-flush_dcache_page.patch [new file with mode: 0644]
queue-3.10/arm64-change-kernel-stack-size-to-16k.patch [new file with mode: 0644]
queue-3.10/arm64-check-for-number-of-arguments-in-syscall_get-set_arguments.patch [new file with mode: 0644]
queue-3.10/arm64-do-not-flush-the-d-cache-for-anonymous-pages.patch [new file with mode: 0644]
queue-3.10/arm64-dts-reserve-the-memory-used-for-secondary-cpu-release-address.patch [new file with mode: 0644]
queue-3.10/arm64-fix-possible-invalid-fpsimd-initialization-state.patch [new file with mode: 0644]
queue-3.10/arm64-only-enable-local-interrupts-after-the-cpu-is-marked-online.patch [new file with mode: 0644]
queue-3.10/arm64-remove-unused-cpu_name-ascii-in-arch-arm64-mm-proc.s.patch [new file with mode: 0644]
queue-3.10/arm64-use-normal-noncacheable-memory-for-writecombine.patch [new file with mode: 0644]
queue-3.10/arm64-virt-ensure-visibility-of-__boot_cpu_mode.patch [new file with mode: 0644]
queue-3.10/clocksource-arch_timer-use-virtual-counters.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/arm64-avoid-cache-flushing-in-flush_dcache_page.patch b/queue-3.10/arm64-avoid-cache-flushing-in-flush_dcache_page.patch
new file mode 100644 (file)
index 0000000..584e1c7
--- /dev/null
@@ -0,0 +1,60 @@
+From b5b6c9e9149d8a7c3f1d7b9d0c046c6184e1dd17 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Wed, 1 May 2013 12:23:05 +0100
+Subject: arm64: Avoid cache flushing in flush_dcache_page()
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit b5b6c9e9149d8a7c3f1d7b9d0c046c6184e1dd17 upstream.
+
+The flush_dcache_page() function is called when the kernel modified a
+page cache page. Since the D-cache on AArch64 does not have aliases
+this function can simply mark the page as dirty for later flushing via
+set_pte_at()/__sync_icache_dcache() if the page is executable (to ensure
+the I-D cache coherency).
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/flush.c |   22 ++++------------------
+ 1 file changed, 4 insertions(+), 18 deletions(-)
+
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -94,28 +94,14 @@ void __sync_icache_dcache(pte_t pte, uns
+ }
+ /*
+- * Ensure cache coherency between kernel mapping and userspace mapping of this
+- * page.
++ * This function is called when a page has been modified by the kernel. Mark
++ * it as dirty for later flushing when mapped in user space (if executable,
++ * see __sync_icache_dcache).
+  */
+ void flush_dcache_page(struct page *page)
+ {
+-      struct address_space *mapping;
+-
+-      /*
+-       * The zero page is never written to, so never has any dirty cache
+-       * lines, and therefore never needs to be flushed.
+-       */
+-      if (page == ZERO_PAGE(0))
+-              return;
+-
+-      mapping = page_mapping(page);
+-      if (mapping && mapping_mapped(mapping)) {
+-              __flush_dcache_page(page);
+-              __flush_icache_all();
+-              set_bit(PG_dcache_clean, &page->flags);
+-      } else {
++      if (test_bit(PG_dcache_clean, &page->flags))
+               clear_bit(PG_dcache_clean, &page->flags);
+-      }
+ }
+ EXPORT_SYMBOL(flush_dcache_page);
diff --git a/queue-3.10/arm64-change-kernel-stack-size-to-16k.patch b/queue-3.10/arm64-change-kernel-stack-size-to-16k.patch
new file mode 100644 (file)
index 0000000..80f3231
--- /dev/null
@@ -0,0 +1,49 @@
+From 845ad05ec31e0f3872a321e10dbeaf872022632c Mon Sep 17 00:00:00 2001
+From: Feng Kan <fkan@apm.com>
+Date: Tue, 23 Jul 2013 18:52:31 +0100
+Subject: arm64: Change kernel stack size to 16K
+
+From: Feng Kan <fkan@apm.com>
+
+commit 845ad05ec31e0f3872a321e10dbeaf872022632c upstream.
+
+Written by Catalin Marinas, tested by APM on storm platform. This is needed
+because of the failures encountered when running SpecWeb benchmark test.
+
+Signed-off-by: Feng Kan <fkan@apm.com>
+Acked-by: Kumar Sankaran <ksankaran@apm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/thread_info.h |    4 ++--
+ arch/arm64/kernel/entry.S            |    2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -24,10 +24,10 @@
+ #include <linux/compiler.h>
+ #ifndef CONFIG_ARM64_64K_PAGES
+-#define THREAD_SIZE_ORDER     1
++#define THREAD_SIZE_ORDER     2
+ #endif
+-#define THREAD_SIZE           8192
++#define THREAD_SIZE           16384
+ #define THREAD_START_SP               (THREAD_SIZE - 16)
+ #ifndef __ASSEMBLY__
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -121,7 +121,7 @@
+       .macro  get_thread_info, rd
+       mov     \rd, sp
+-      and     \rd, \rd, #~((1 << 13) - 1)     // top of 8K stack
++      and     \rd, \rd, #~(THREAD_SIZE - 1)   // top of stack
+       .endm
+ /*
diff --git a/queue-3.10/arm64-check-for-number-of-arguments-in-syscall_get-set_arguments.patch b/queue-3.10/arm64-check-for-number-of-arguments-in-syscall_get-set_arguments.patch
new file mode 100644 (file)
index 0000000..576de82
--- /dev/null
@@ -0,0 +1,53 @@
+From 7b22c03536a539142f931815528d55df455ffe2d Mon Sep 17 00:00:00 2001
+From: AKASHI Takahiro <takahiro.akashi@linaro.org>
+Date: Thu, 3 Oct 2013 06:47:44 +0100
+Subject: arm64: check for number of arguments in syscall_get/set_arguments()
+
+From: AKASHI Takahiro <takahiro.akashi@linaro.org>
+
+commit 7b22c03536a539142f931815528d55df455ffe2d upstream.
+
+In ftrace_syscall_enter(),
+    syscall_get_arguments(..., 0, n, ...)
+        if (i == 0) { <handle orig_x0> ...; n--;}
+        memcpy(..., n * sizeof(args[0]));
+If 'number of arguments(n)' is zero and 'argument index(i)' is also zero in
+syscall_get_arguments(), none of arguments should be copied by memcpy().
+Otherwise 'n--' can be a big positive number and unexpected amount of data
+will be copied. Tracing system calls which take no argument, say sync(void),
+may hit this case and eventually make the system corrupted.
+This patch fixes the issue both in syscall_get_arguments() and
+syscall_set_arguments().
+
+Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/syscall.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/include/asm/syscall.h
++++ b/arch/arm64/include/asm/syscall.h
+@@ -59,6 +59,9 @@ static inline void syscall_get_arguments
+                                        unsigned int i, unsigned int n,
+                                        unsigned long *args)
+ {
++      if (n == 0)
++              return;
++
+       if (i + n > SYSCALL_MAX_ARGS) {
+               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+@@ -82,6 +85,9 @@ static inline void syscall_set_arguments
+                                        unsigned int i, unsigned int n,
+                                        const unsigned long *args)
+ {
++      if (n == 0)
++              return;
++
+       if (i + n > SYSCALL_MAX_ARGS) {
+               pr_warning("%s called with max args %d, handling only %d\n",
+                          __func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/queue-3.10/arm64-do-not-flush-the-d-cache-for-anonymous-pages.patch b/queue-3.10/arm64-do-not-flush-the-d-cache-for-anonymous-pages.patch
new file mode 100644 (file)
index 0000000..70c5756
--- /dev/null
@@ -0,0 +1,53 @@
+From 7249b79f6b4cc3c2aa9138dca52e535a4c789107 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Wed, 1 May 2013 16:34:22 +0100
+Subject: arm64: Do not flush the D-cache for anonymous pages
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 7249b79f6b4cc3c2aa9138dca52e535a4c789107 upstream.
+
+The D-cache on AArch64 is VIPT non-aliasing, so there is no need to
+flush it for anonymous pages.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/flush.c |    8 +++-----
+ arch/arm64/mm/mmu.c   |    1 -
+ 2 files changed, 3 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -77,14 +77,12 @@ void __flush_dcache_page(struct page *pa
+ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ {
+-      unsigned long pfn;
+-      struct page *page;
++      struct page *page = pte_page(pte);
+-      pfn = pte_pfn(pte);
+-      if (!pfn_valid(pfn))
++      /* no flushing needed for anonymous pages */
++      if (!page_mapping(page))
+               return;
+-      page = pfn_to_page(pfn);
+       if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+               __flush_dcache_page(page);
+               __flush_icache_all();
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -339,7 +339,6 @@ void __init paging_init(void)
+       bootmem_init();
+       empty_zero_page = virt_to_page(zero_page);
+-      __flush_dcache_page(empty_zero_page);
+       /*
+        * TTBR0 is only used for the identity mapping at this stage. Make it
diff --git a/queue-3.10/arm64-dts-reserve-the-memory-used-for-secondary-cpu-release-address.patch b/queue-3.10/arm64-dts-reserve-the-memory-used-for-secondary-cpu-release-address.patch
new file mode 100644 (file)
index 0000000..37a52b8
--- /dev/null
@@ -0,0 +1,33 @@
+From df503ba7f653c590b475ab80bde788edf5af70d5 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Thu, 14 Nov 2013 15:15:37 +0000
+Subject: arm64: dts: Reserve the memory used for secondary CPU release address
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit df503ba7f653c590b475ab80bde788edf5af70d5 upstream.
+
+With the spin-table SMP booting method, secondary CPUs poll a location
+passed in the DT. The foundation-v8.dts file doesn't have this memory
+reserved and there is a risk of Linux using it before secondary CPUs are
+started.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/foundation-v8.dts |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/boot/dts/foundation-v8.dts
++++ b/arch/arm64/boot/dts/foundation-v8.dts
+@@ -6,6 +6,8 @@
+ /dts-v1/;
++/memreserve/ 0x80000000 0x00010000;
++
+ / {
+       model = "Foundation-v8A";
+       compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/queue-3.10/arm64-fix-possible-invalid-fpsimd-initialization-state.patch b/queue-3.10/arm64-fix-possible-invalid-fpsimd-initialization-state.patch
new file mode 100644 (file)
index 0000000..5db4ae9
--- /dev/null
@@ -0,0 +1,38 @@
+From 6db83cea1c975b9a102e17def7d2795814e1ae2b Mon Sep 17 00:00:00 2001
+From: Jiang Liu <jiang.liu@huawei.com>
+Date: Fri, 27 Sep 2013 09:04:41 +0100
+Subject: arm64: fix possible invalid FPSIMD initialization state
+
+From: Jiang Liu <jiang.liu@huawei.com>
+
+commit 6db83cea1c975b9a102e17def7d2795814e1ae2b upstream.
+
+If context switching happens during executing fpsimd_flush_thread(),
+stale value in FPSIMD registers will be saved into current thread's
+fpsimd_state by fpsimd_thread_switch(). That may cause invalid
+initialization state for the new process, so disable preemption
+when executing fpsimd_flush_thread().
+
+Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
+Cc: Jiang Liu <liuj97@gmail.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/fpsimd.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -79,8 +79,10 @@ void fpsimd_thread_switch(struct task_st
+ void fpsimd_flush_thread(void)
+ {
++      preempt_disable();
+       memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+       fpsimd_load_state(&current->thread.fpsimd_state);
++      preempt_enable();
+ }
+ /*
diff --git a/queue-3.10/arm64-only-enable-local-interrupts-after-the-cpu-is-marked-online.patch b/queue-3.10/arm64-only-enable-local-interrupts-after-the-cpu-is-marked-online.patch
new file mode 100644 (file)
index 0000000..937861e
--- /dev/null
@@ -0,0 +1,53 @@
+From 53ae3acd4390ffeecb3a11dbd5be347b5a3d98f2 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 19 Jul 2013 15:08:15 +0100
+Subject: arm64: Only enable local interrupts after the CPU is marked online
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 53ae3acd4390ffeecb3a11dbd5be347b5a3d98f2 upstream.
+
+There is a slight chance that (timer) interrupts are triggered before a
+secondary CPU has been marked online with implications on softirq thread
+affinity.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Kirill Tkhai <tkhai@yandex.ru>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/smp.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_star
+       raw_spin_unlock(&boot_lock);
+       /*
+-       * Enable local interrupts.
+-       */
+-      notify_cpu_starting(cpu);
+-      local_irq_enable();
+-      local_fiq_enable();
+-
+-      /*
+        * OK, now it's safe to let the boot CPU continue.  Wait for
+        * the CPU migration code to notice that the CPU is online
+        * before we continue.
+@@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_star
+       complete(&cpu_running);
+       /*
++       * Enable GIC and timers.
++       */
++      notify_cpu_starting(cpu);
++
++      local_irq_enable();
++      local_fiq_enable();
++
++      /*
+        * OK, it's off to the idle thread for us
+        */
+       cpu_startup_entry(CPUHP_ONLINE);
diff --git a/queue-3.10/arm64-remove-unused-cpu_name-ascii-in-arch-arm64-mm-proc.s.patch b/queue-3.10/arm64-remove-unused-cpu_name-ascii-in-arch-arm64-mm-proc.s.patch
new file mode 100644 (file)
index 0000000..14d3f55
--- /dev/null
@@ -0,0 +1,32 @@
+From f3a1d7d53dccf51959aec16b574617cc6bfeca09 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Mon, 2 Sep 2013 16:33:54 +0100
+Subject: arm64: Remove unused cpu_name ascii in arch/arm64/mm/proc.S
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit f3a1d7d53dccf51959aec16b574617cc6bfeca09 upstream.
+
+This string has been moved to arch/arm64/kernel/cputable.c.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/proc.S |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm)
+       ret
+ ENDPROC(cpu_do_switch_mm)
+-cpu_name:
+-      .ascii  "AArch64 Processor"
+-      .align
+-
+       .section ".text.init", #alloc, #execinstr
+ /*
diff --git a/queue-3.10/arm64-use-normal-noncacheable-memory-for-writecombine.patch b/queue-3.10/arm64-use-normal-noncacheable-memory-for-writecombine.patch
new file mode 100644 (file)
index 0000000..56b8fbc
--- /dev/null
@@ -0,0 +1,32 @@
+From 4f00130b70e5eee813cc7bc298e0f3fdf79673cc Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 29 Nov 2013 10:56:14 +0000
+Subject: arm64: Use Normal NonCacheable memory for writecombine
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 4f00130b70e5eee813cc7bc298e0f3fdf79673cc upstream.
+
+This provides better performance compared to Device GRE and also allows
+unaligned accesses. Such memory is intended to be used with standard RAM
+(e.g. framebuffers) and not I/O.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -184,7 +184,7 @@ static inline void set_pte_at(struct mm_
+ #define pgprot_noncached(prot) \
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ #define pgprot_writecombine(prot) \
+-      __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
++      __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ #define pgprot_dmacoherent(prot) \
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ #define __HAVE_PHYS_MEM_ACCESS_PROT
diff --git a/queue-3.10/arm64-virt-ensure-visibility-of-__boot_cpu_mode.patch b/queue-3.10/arm64-virt-ensure-visibility-of-__boot_cpu_mode.patch
new file mode 100644 (file)
index 0000000..4cd79d9
--- /dev/null
@@ -0,0 +1,66 @@
+From 82b2f495fba338d1e3098dde1df54944a9c19751 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Tue, 9 Jul 2013 15:16:06 +0100
+Subject: arm64: virt: ensure visibility of __boot_cpu_mode
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 82b2f495fba338d1e3098dde1df54944a9c19751 upstream.
+
+Secondary CPUs write to __boot_cpu_mode with caches disabled, and thus a
+cached value of __boot_cpu_mode may be incoherent with that in memory.
+This could lead to a failure to detect mismatched boot modes.
+
+This patch adds flushing to ensure that writes by secondaries to
+__boot_cpu_mode are made visible before we test against it.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Christoffer Dall <cdall@cs.columbia.edu>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/virt.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/arm64/include/asm/virt.h
++++ b/arch/arm64/include/asm/virt.h
+@@ -21,6 +21,7 @@
+ #define BOOT_CPU_MODE_EL2     (0x0e12b007)
+ #ifndef __ASSEMBLY__
++#include <asm/cacheflush.h>
+ /*
+  * __boot_cpu_mode records what mode CPUs were booted in.
+@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
+ void __hyp_set_vectors(phys_addr_t phys_vector_base);
+ phys_addr_t __hyp_get_vectors(void);
++static inline void sync_boot_mode(void)
++{
++      /*
++       * As secondaries write to __boot_cpu_mode with caches disabled, we
++       * must flush the corresponding cache entries to ensure the visibility
++       * of their writes.
++       */
++      __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
++}
++
+ /* Reports the availability of HYP mode */
+ static inline bool is_hyp_mode_available(void)
+ {
++      sync_boot_mode();
+       return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
+               __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
+ }
+@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available
+ /* Check if the bootloader has booted CPUs in different modes */
+ static inline bool is_hyp_mode_mismatched(void)
+ {
++      sync_boot_mode();
+       return __boot_cpu_mode[0] != __boot_cpu_mode[1];
+ }
diff --git a/queue-3.10/clocksource-arch_timer-use-virtual-counters.patch b/queue-3.10/clocksource-arch_timer-use-virtual-counters.patch
new file mode 100644 (file)
index 0000000..f9cb7c1
--- /dev/null
@@ -0,0 +1,144 @@
+From 0d651e4e65e96989f72236bf83bd4c6e55eb6ce4 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 30 Jan 2013 17:51:26 +0000
+Subject: clocksource: arch_timer: use virtual counters
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0d651e4e65e96989f72236bf83bd4c6e55eb6ce4 upstream.
+
+Switching between reading the virtual or physical counters is
+problematic, as some core code wants a view of time before we're fully
+set up. Using a function pointer and switching the source after the
+first read can make time appear to go backwards, and having a check in
+the read function is an unfortunate block on what we want to be a fast
+path.
+
+Instead, this patch makes us always use the virtual counters. If we're a
+guest, or don't have hyp mode, we'll use the virtual timers, and as such
+don't care about CNTVOFF as long as it doesn't change in such a way as
+to make time appear to travel backwards. As the guest will use the
+virtual timers, a (potential) KVM host must use the physical timers
+(which can wake up the host even if they fire while a guest is
+executing), and hence a host must have CNTVOFF set to zero so as to have
+a consistent view of time between the physical timers and virtual
+counters.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Cc: Rob Herring <rob.herring@calxeda.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/arch_timer.h    |    9 ---------
+ arch/arm64/include/asm/arch_timer.h  |   10 ----------
+ drivers/clocksource/arm_arch_timer.c |   23 +++++------------------
+ include/clocksource/arm_arch_timer.h |    2 +-
+ 4 files changed, 6 insertions(+), 38 deletions(-)
+
+--- a/arch/arm/include/asm/arch_timer.h
++++ b/arch/arm/include/asm/arch_timer.h
+@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(
+       return val;
+ }
+-static inline u64 arch_counter_get_cntpct(void)
+-{
+-      u64 cval;
+-
+-      isb();
+-      asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+-      return cval;
+-}
+-
+ static inline u64 arch_counter_get_cntvct(void)
+ {
+       u64 cval;
+--- a/arch/arm64/include/asm/arch_timer.h
++++ b/arch/arm64/include/asm/arch_timer.h
+@@ -110,16 +110,6 @@ static inline void __cpuinit arch_counte
+       asm volatile("msr       cntkctl_el1, %0" : : "r" (cntkctl));
+ }
+-static inline u64 arch_counter_get_cntpct(void)
+-{
+-      u64 cval;
+-
+-      isb();
+-      asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+-
+-      return cval;
+-}
+-
+ static inline u64 arch_counter_get_cntvct(void)
+ {
+       u64 cval;
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void)
+       return arch_timer_rate;
+ }
+-/*
+- * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
+- * call it before it has been initialised. Rather than incur a performance
+- * penalty checking for initialisation, provide a default implementation that
+- * won't lead to time appearing to jump backwards.
+- */
+-static u64 arch_timer_read_zero(void)
++u64 arch_timer_read_counter(void)
+ {
+-      return 0;
++      return arch_counter_get_cntvct();
+ }
+-u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
+-
+ static cycle_t arch_counter_read(struct clocksource *cs)
+ {
+-      return arch_timer_read_counter();
++      return arch_counter_get_cntvct();
+ }
+ static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+ {
+-      return arch_timer_read_counter();
++      return arch_counter_get_cntvct();
+ }
+ static struct clocksource clocksource_counter = {
+@@ -287,7 +279,7 @@ static int __init arch_timer_register(vo
+       cyclecounter.mult = clocksource_counter.mult;
+       cyclecounter.shift = clocksource_counter.shift;
+       timecounter_init(&timecounter, &cyclecounter,
+-                       arch_counter_get_cntpct());
++                       arch_counter_get_cntvct());
+       if (arch_timer_use_virtual) {
+               ppi = arch_timer_ppi[VIRT_PPI];
+@@ -376,11 +368,6 @@ static void __init arch_timer_init(struc
+               }
+       }
+-      if (arch_timer_use_virtual)
+-              arch_timer_read_counter = arch_counter_get_cntvct;
+-      else
+-              arch_timer_read_counter = arch_counter_get_cntpct;
+-
+       arch_timer_register();
+       arch_timer_arch_init();
+ }
+--- a/include/clocksource/arm_arch_timer.h
++++ b/include/clocksource/arm_arch_timer.h
+@@ -32,7 +32,7 @@
+ #ifdef CONFIG_ARM_ARCH_TIMER
+ extern u32 arch_timer_get_rate(void);
+-extern u64 (*arch_timer_read_counter)(void);
++extern u64 arch_timer_read_counter(void);
+ extern struct timecounter *arch_timer_get_timecounter(void);
+ #else
index c242eabc928754b1f2a1259e42cffcb5047be291..9b7ec0f112a013f7e1e0be2739709dc8acef852c 100644 (file)
@@ -116,3 +116,14 @@ rbd-make-rbd_obj_notify_ack-synchronous.patch
 rbd-fix-use-after-free-of-rbd_dev-disk.patch
 rbd-ignore-unmapped-snapshots-that-no-longer-exist.patch
 rbd-fix-error-handling-from-rbd_snap_name.patch
+arm64-only-enable-local-interrupts-after-the-cpu-is-marked-online.patch
+arm64-virt-ensure-visibility-of-__boot_cpu_mode.patch
+arm64-change-kernel-stack-size-to-16k.patch
+arm64-fix-possible-invalid-fpsimd-initialization-state.patch
+arm64-check-for-number-of-arguments-in-syscall_get-set_arguments.patch
+arm64-dts-reserve-the-memory-used-for-secondary-cpu-release-address.patch
+arm64-remove-unused-cpu_name-ascii-in-arch-arm64-mm-proc.s.patch
+clocksource-arch_timer-use-virtual-counters.patch
+arm64-avoid-cache-flushing-in-flush_dcache_page.patch
+arm64-do-not-flush-the-d-cache-for-anonymous-pages.patch
+arm64-use-normal-noncacheable-memory-for-writecombine.patch