]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Feb 2015 03:40:50 +0000 (19:40 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Feb 2015 03:40:50 +0000 (19:40 -0800)
added patches:
arm-7829-1-add-.text.unlikely-and-.text.hot-to-arm-unwind-tables.patch
arm-7866-1-include-asm-use-long-long-instead-of-u64-within-atomic.h.patch
arm-7867-1-include-asm-use-int-instead-of-unsigned-long-for-oldval-in-atomic_cmpxchg.patch
arm-7931-1-correct-virt_addr_valid.patch
arm-8108-1-mm-introduce-pte-pmd-_isset-and.patch
arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch
arm-dma-ensure-that-old-section-mappings-are-flushed-from-the-tlb.patch
arm-fix-asm-memory.h-build-error.patch
arm-fix-type-of-phys_pfn_offset-to-unsigned-long.patch
arm-lpae-fix-definition-of-pte_hwtable_ptrs.patch
arm-lpae-use-phys_addr_t-in-alloc_init_pud.patch
arm-lpae-use-signed-arithmetic-for-mask-definitions.patch
arm-mm-correct-pte_same-behaviour-for-lpae.patch

14 files changed:
queue-3.10/arm-7829-1-add-.text.unlikely-and-.text.hot-to-arm-unwind-tables.patch [new file with mode: 0644]
queue-3.10/arm-7866-1-include-asm-use-long-long-instead-of-u64-within-atomic.h.patch [new file with mode: 0644]
queue-3.10/arm-7867-1-include-asm-use-int-instead-of-unsigned-long-for-oldval-in-atomic_cmpxchg.patch [new file with mode: 0644]
queue-3.10/arm-7931-1-correct-virt_addr_valid.patch [new file with mode: 0644]
queue-3.10/arm-8108-1-mm-introduce-pte-pmd-_isset-and.patch [new file with mode: 0644]
queue-3.10/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch [new file with mode: 0644]
queue-3.10/arm-dma-ensure-that-old-section-mappings-are-flushed-from-the-tlb.patch [new file with mode: 0644]
queue-3.10/arm-fix-asm-memory.h-build-error.patch [new file with mode: 0644]
queue-3.10/arm-fix-type-of-phys_pfn_offset-to-unsigned-long.patch [new file with mode: 0644]
queue-3.10/arm-lpae-fix-definition-of-pte_hwtable_ptrs.patch [new file with mode: 0644]
queue-3.10/arm-lpae-use-phys_addr_t-in-alloc_init_pud.patch [new file with mode: 0644]
queue-3.10/arm-lpae-use-signed-arithmetic-for-mask-definitions.patch [new file with mode: 0644]
queue-3.10/arm-mm-correct-pte_same-behaviour-for-lpae.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/arm-7829-1-add-.text.unlikely-and-.text.hot-to-arm-unwind-tables.patch b/queue-3.10/arm-7829-1-add-.text.unlikely-and-.text.hot-to-arm-unwind-tables.patch
new file mode 100644 (file)
index 0000000..55e3ce4
--- /dev/null
@@ -0,0 +1,68 @@
+From 849b882b52df0f276d9ffded01d85654aa0da422 Mon Sep 17 00:00:00 2001
+From: Douglas Anderson <dianders@chromium.org>
+Date: Thu, 29 Aug 2013 00:08:01 +0100
+Subject: ARM: 7829/1: Add ".text.unlikely" and ".text.hot" to arm unwind tables
+
+From: Douglas Anderson <dianders@chromium.org>
+
+commit 849b882b52df0f276d9ffded01d85654aa0da422 upstream.
+
+It appears that gcc may put some code in ".text.unlikely" or
+".text.hot" sections.  Right now those aren't accounted for in unwind
+tables.  Add them.
+
+I found some docs about this at:
+  http://gcc.gnu.org/onlinedocs/gcc-4.6.2/gcc.pdf
+
+Without this, if you have slub_debug turned on, you can get messages
+that look like this:
+  unwind: Index not found 7f008c50
+
+Signed-off-by: Doug Anderson <dianders@chromium.org>
+Acked-by: Mike Frysinger <vapier@gentoo.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[wangkai: backport to 3.10
+       - adjust context
+]
+Signed-off-by: Wang Kai <morgan.wang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/module.h |    2 ++
+ arch/arm/kernel/module.c      |    8 ++++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/arm/include/asm/module.h
++++ b/arch/arm/include/asm/module.h
+@@ -12,6 +12,8 @@ enum {
+       ARM_SEC_CORE,
+       ARM_SEC_EXIT,
+       ARM_SEC_DEVEXIT,
++      ARM_SEC_HOT,
++      ARM_SEC_UNLIKELY,
+       ARM_SEC_MAX,
+ };
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -296,6 +296,10 @@ int module_finalize(const Elf32_Ehdr *hd
+                       maps[ARM_SEC_EXIT].unw_sec = s;
+               else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
+                       maps[ARM_SEC_DEVEXIT].unw_sec = s;
++              else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
++                      maps[ARM_SEC_UNLIKELY].unw_sec = s;
++              else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
++                      maps[ARM_SEC_HOT].unw_sec = s;
+               else if (strcmp(".init.text", secname) == 0)
+                       maps[ARM_SEC_INIT].txt_sec = s;
+               else if (strcmp(".devinit.text", secname) == 0)
+@@ -306,6 +310,10 @@ int module_finalize(const Elf32_Ehdr *hd
+                       maps[ARM_SEC_EXIT].txt_sec = s;
+               else if (strcmp(".devexit.text", secname) == 0)
+                       maps[ARM_SEC_DEVEXIT].txt_sec = s;
++              else if (strcmp(".text.unlikely", secname) == 0)
++                      maps[ARM_SEC_UNLIKELY].txt_sec = s;
++              else if (strcmp(".text.hot", secname) == 0)
++                      maps[ARM_SEC_HOT].txt_sec = s;
+       }
+       for (i = 0; i < ARM_SEC_MAX; i++)
diff --git a/queue-3.10/arm-7866-1-include-asm-use-long-long-instead-of-u64-within-atomic.h.patch b/queue-3.10/arm-7866-1-include-asm-use-long-long-instead-of-u64-within-atomic.h.patch
new file mode 100644 (file)
index 0000000..341f645
--- /dev/null
@@ -0,0 +1,184 @@
+From 237f12337cfa2175474e4dd015bc07a25eb9080d Mon Sep 17 00:00:00 2001
+From: Chen Gang <gang.chen@asianux.com>
+Date: Sat, 26 Oct 2013 15:07:04 +0100
+Subject: ARM: 7866/1: include: asm: use 'long long' instead of 'u64' within atomic.h
+
+From: Chen Gang <gang.chen@asianux.com>
+
+commit 237f12337cfa2175474e4dd015bc07a25eb9080d upstream.
+
+atomic* value is signed value, and atomic* functions need also process
+signed value (parameter value, and return value), so 32-bit arm need
+use 'long long' instead of 'u64'.
+
+After replacement, it will also fix a bug for atomic64_add_negative():
+"u64 is never less than 0".
+
+The modifications are:
+
+  in vim, use "1,% s/\<u64\>/long long/g" command.
+  remove '__aligned(8)' which is useless for 64-bit.
+  be sure of 80 column limitation after replacement.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Chen Gang <gang.chen@asianux.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/atomic.h |   49 +++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -238,15 +238,15 @@ static inline int __atomic_add_unless(at
+ #ifndef CONFIG_GENERIC_ATOMIC64
+ typedef struct {
+-      u64 __aligned(8) counter;
++      long long counter;
+ } atomic64_t;
+ #define ATOMIC64_INIT(i) { (i) }
+ #ifdef CONFIG_ARM_LPAE
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       __asm__ __volatile__("@ atomic64_read\n"
+ "     ldrd    %0, %H0, [%1]"
+@@ -257,7 +257,7 @@ static inline u64 atomic64_read(const at
+       return result;
+ }
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+       __asm__ __volatile__("@ atomic64_set\n"
+ "     strd    %2, %H2, [%1]"
+@@ -266,9 +266,9 @@ static inline void atomic64_set(atomic64
+       );
+ }
+ #else
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       __asm__ __volatile__("@ atomic64_read\n"
+ "     ldrexd  %0, %H0, [%1]"
+@@ -279,9 +279,9 @@ static inline u64 atomic64_read(const at
+       return result;
+ }
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+-      u64 tmp;
++      long long tmp;
+       __asm__ __volatile__("@ atomic64_set\n"
+ "1:   ldrexd  %0, %H0, [%2]\n"
+@@ -294,9 +294,9 @@ static inline void atomic64_set(atomic64
+ }
+ #endif
+-static inline void atomic64_add(u64 i, atomic64_t *v)
++static inline void atomic64_add(long long i, atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       __asm__ __volatile__("@ atomic64_add\n"
+@@ -311,9 +311,9 @@ static inline void atomic64_add(u64 i, a
+       : "cc");
+ }
+-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
++static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       smp_mb();
+@@ -334,9 +334,9 @@ static inline u64 atomic64_add_return(u6
+       return result;
+ }
+-static inline void atomic64_sub(u64 i, atomic64_t *v)
++static inline void atomic64_sub(long long i, atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       __asm__ __volatile__("@ atomic64_sub\n"
+@@ -351,9 +351,9 @@ static inline void atomic64_sub(u64 i, a
+       : "cc");
+ }
+-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
++static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       smp_mb();
+@@ -374,9 +374,10 @@ static inline u64 atomic64_sub_return(u6
+       return result;
+ }
+-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
++static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
++                                      long long new)
+ {
+-      u64 oldval;
++      long long oldval;
+       unsigned long res;
+       smp_mb();
+@@ -398,9 +399,9 @@ static inline u64 atomic64_cmpxchg(atomi
+       return oldval;
+ }
+-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
++static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       smp_mb();
+@@ -419,9 +420,9 @@ static inline u64 atomic64_xchg(atomic64
+       return result;
+ }
+-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
++static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+-      u64 result;
++      long long result;
+       unsigned long tmp;
+       smp_mb();
+@@ -445,9 +446,9 @@ static inline u64 atomic64_dec_if_positi
+       return result;
+ }
+-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
++static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ {
+-      u64 val;
++      long long val;
+       unsigned long tmp;
+       int ret = 1;
diff --git a/queue-3.10/arm-7867-1-include-asm-use-int-instead-of-unsigned-long-for-oldval-in-atomic_cmpxchg.patch b/queue-3.10/arm-7867-1-include-asm-use-int-instead-of-unsigned-long-for-oldval-in-atomic_cmpxchg.patch
new file mode 100644 (file)
index 0000000..ea43aa6
--- /dev/null
@@ -0,0 +1,36 @@
+From 4dcc1cf7316a26e112f5c9fcca531ff98ef44700 Mon Sep 17 00:00:00 2001
+From: Chen Gang <gang.chen@asianux.com>
+Date: Sat, 26 Oct 2013 15:07:25 +0100
+Subject: ARM: 7867/1: include: asm: use 'int' instead of 'unsigned long' for 'oldval' in atomic_cmpxchg().
+
+From: Chen Gang <gang.chen@asianux.com>
+
+commit 4dcc1cf7316a26e112f5c9fcca531ff98ef44700 upstream.
+
+For atomic_cmpxchg(), the type of 'oldval' need be 'int' to match the
+type of "*ptr" (used by 'ldrex' instruction) and 'old' (used by 'teq'
+instruction).
+
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Chen Gang <gang.chen@asianux.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/atomic.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int
+ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ {
+-      unsigned long oldval, res;
++      int oldval;
++      unsigned long res;
+       smp_mb();
diff --git a/queue-3.10/arm-7931-1-correct-virt_addr_valid.patch b/queue-3.10/arm-7931-1-correct-virt_addr_valid.patch
new file mode 100644 (file)
index 0000000..45646ae
--- /dev/null
@@ -0,0 +1,44 @@
+From efea3403d4b7c6d1dd5d5ac3234c161e8b314d66 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <lauraa@codeaurora.org>
+Date: Sat, 21 Dec 2013 01:03:06 +0100
+Subject: ARM: 7931/1: Correct virt_addr_valid
+
+From: Laura Abbott <lauraa@codeaurora.org>
+
+commit efea3403d4b7c6d1dd5d5ac3234c161e8b314d66 upstream.
+
+The definition of virt_addr_valid is that virt_addr_valid should
+return true if and only if virt_to_page returns a valid pointer.
+The current definition of virt_addr_valid only checks against the
+virtual address range. There's no guarantee that just because a
+virtual address falls bewteen PAGE_OFFSET and high_memory the
+associated physical memory has a valid backing struct page. Follow
+the example of other architectures and convert to pfn_valid to
+verify that the virtual address is actually valid. The check for
+an address between PAGE_OFFSET and high_memory is still necessary
+as vmalloc/highmem addresses are not valid with virt_to_page.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Nicolas Pitre <nico@linaro.org>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/memory.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/memory.h
++++ b/arch/arm/include/asm/memory.h
+@@ -274,7 +274,8 @@ static inline __deprecated void *bus_to_
+ #define ARCH_PFN_OFFSET               PHYS_PFN_OFFSET
+ #define virt_to_page(kaddr)   pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+-#define virt_addr_valid(kaddr)        ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
++#define virt_addr_valid(kaddr)        (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
++                                      && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
+ #endif
diff --git a/queue-3.10/arm-8108-1-mm-introduce-pte-pmd-_isset-and.patch b/queue-3.10/arm-8108-1-mm-introduce-pte-pmd-_isset-and.patch
new file mode 100644 (file)
index 0000000..8e368cc
--- /dev/null
@@ -0,0 +1,64 @@
+From f2950706871c4b6e8c0f0d7c3f62d35930b8de63 Mon Sep 17 00:00:00 2001
+From: Steven Capper <steve.capper@linaro.org>
+Date: Fri, 18 Jul 2014 16:15:27 +0100
+Subject: ARM: 8108/1: mm: Introduce {pte,pmd}_isset and
+ {pte,pmd}_isclear
+
+From: Steven Capper <steve.capper@linaro.org>
+
+commit f2950706871c4b6e8c0f0d7c3f62d35930b8de63 upstream.
+
+Long descriptors on ARM are 64 bits, and some pte functions such as
+pte_dirty return a bitwise-and of a flag with the pte value. If the
+flag to be tested resides in the upper 32 bits of the pte, then we run
+into the danger of the result being dropped if downcast.
+
+For example:
+       gather_stats(page, md, pte_dirty(*pte), 1);
+where pte_dirty(*pte) is downcast to an int.
+
+This patch introduces a new macro pte_isset which performs the bitwise
+and, then performs a double logical invert (where needed) to ensure
+predictable downcasting. The logical inverse pte_isclear is also
+introduced.
+
+Equivalent pmd functions for Transparent HugePages have also been
+added.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[hpy: Backported to 3.10:
+ - adjust the context
+ - ignore change to pmd, because 3.10 does not support HugePage.]
+Signed-off-by: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable.h |   14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -211,12 +211,16 @@ static inline pte_t *pmd_page_vaddr(pmd_
+ #define pte_clear(mm,addr,ptep)       set_pte_ext(ptep, __pte(0), 0)
++#define pte_isset(pte, val)   ((u32)(val) == (val) ? pte_val(pte) & (val) \
++                                              : !!(pte_val(pte) & (val)))
++#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
++
+ #define pte_none(pte)         (!pte_val(pte))
+-#define pte_present(pte)      (pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte)                (!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte)                (pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte)                (pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte)         (!(pte_val(pte) & L_PTE_XN))
++#define pte_present(pte)      (pte_isset((pte), L_PTE_PRESENT))
++#define pte_write(pte)                (pte_isclear((pte), L_PTE_RDONLY))
++#define pte_dirty(pte)                (pte_isset((pte), L_PTE_DIRTY))
++#define pte_young(pte)                (pte_isset((pte), L_PTE_YOUNG))
++#define pte_exec(pte)         (pte_isclear((pte), L_PTE_XN))
+ #define pte_special(pte)      (0)
+ #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
diff --git a/queue-3.10/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch b/queue-3.10/arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch
new file mode 100644 (file)
index 0000000..6112f19
--- /dev/null
@@ -0,0 +1,97 @@
+From ded9477984690d026e46dd75e8157392cea3f13f Mon Sep 17 00:00:00 2001
+From: Steven Capper <steve.capper@linaro.org>
+Date: Fri, 18 Jul 2014 16:16:15 +0100
+Subject: ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE
+
+From: Steven Capper <steve.capper@linaro.org>
+
+commit ded9477984690d026e46dd75e8157392cea3f13f upstream.
+
+For LPAE, we have the following means for encoding writable or dirty
+ptes:
+                              L_PTE_DIRTY       L_PTE_RDONLY
+    !pte_dirty && !pte_write        0               1
+    !pte_dirty && pte_write         0               1
+    pte_dirty && !pte_write         1               1
+    pte_dirty && pte_write          1               0
+
+So we can't distinguish between writeable clean ptes and read only
+ptes. This can cause problems with ptes being incorrectly flagged as
+read only when they are writeable but not dirty.
+
+This patch renumbers L_PTE_RDONLY from AP[2] to a software bit #58,
+and adds additional logic to set AP[2] whenever the pte is read only
+or not dirty. That way we can distinguish between clean writeable ptes
+and read only ptes.
+
+HugeTLB pages will use this new logic automatically.
+
+We need to add some logic to Transparent HugePages to ensure that they
+correctly interpret the revised pgprot permissions (L_PTE_RDONLY has
+moved and no longer matches PMD_SECT_AP2). In the process of revising
+THP, the names of the PMD software bits have been prefixed with L_ to
+make them easier to distinguish from their hardware bit counterparts.
+
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[hpy: Backported to 3.10
+ - adjust the context
+ - ignore change related to pmd, because 3.10 does not support HugePage ]
+Signed-off-by: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable-3level-hwdef.h |    1 +
+ arch/arm/include/asm/pgtable-3level.h       |    6 +++---
+ arch/arm/mm/proc-v7-3level.S                |    9 +++++++--
+ 3 files changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
+@@ -68,6 +68,7 @@
+ #define PTE_TYPE_PAGE         (_AT(pteval_t, 3) << 0)
+ #define PTE_BUFFERABLE                (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
+ #define PTE_CACHEABLE         (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
++#define PTE_AP2                       (_AT(pteval_t, 1) << 7)         /* AP[2] */
+ #define PTE_EXT_SHARED                (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define PTE_EXT_AF            (_AT(pteval_t, 1) << 10)        /* Access Flag */
+ #define PTE_EXT_NG            (_AT(pteval_t, 1) << 11)        /* nG */
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -71,13 +71,13 @@
+ #define L_PTE_PRESENT         (_AT(pteval_t, 3) << 0)         /* Present */
+ #define L_PTE_FILE            (_AT(pteval_t, 1) << 2)         /* only when !PRESENT */
+ #define L_PTE_USER            (_AT(pteval_t, 1) << 6)         /* AP[1] */
+-#define L_PTE_RDONLY          (_AT(pteval_t, 1) << 7)         /* AP[2] */
+ #define L_PTE_SHARED          (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG           (_AT(pteval_t, 1) << 10)        /* AF */
+ #define L_PTE_XN              (_AT(pteval_t, 1) << 54)        /* XN */
+-#define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)        /* unused */
+-#define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)        /* unused */
++#define L_PTE_DIRTY           (_AT(pteval_t, 1) << 55)
++#define L_PTE_SPECIAL         (_AT(pteval_t, 1) << 56)
+ #define L_PTE_NONE            (_AT(pteval_t, 1) << 57)        /* PROT_NONE */
++#define L_PTE_RDONLY          (_AT(pteval_t, 1) << 58)        /* READ ONLY */
+ /*
+  * To be used in assembly code with the upper page attributes.
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
+       tst     rh, #1 << (57 - 32)             @ L_PTE_NONE
+       bicne   rl, #L_PTE_VALID
+       bne     1f
+-      tst     rh, #1 << (55 - 32)             @ L_PTE_DIRTY
+-      orreq   rl, #L_PTE_RDONLY
++
++      eor     ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
++                                      @ test for !L_PTE_DIRTY || L_PTE_RDONLY
++      tst     ip, #1 << (55 - 32) | 1 << (58 - 32)
++      orrne   rl, #PTE_AP2
++      biceq   rl, #PTE_AP2
++
+ 1:    strd    r2, r3, [r0]
+       ALT_SMP(W(nop))
+       ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
diff --git a/queue-3.10/arm-dma-ensure-that-old-section-mappings-are-flushed-from-the-tlb.patch b/queue-3.10/arm-dma-ensure-that-old-section-mappings-are-flushed-from-the-tlb.patch
new file mode 100644 (file)
index 0000000..658f068
--- /dev/null
@@ -0,0 +1,53 @@
+From 6b076991dca9817e75c37e2f0db6d52611ea42fa Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Thu, 17 Jul 2014 12:17:45 +0100
+Subject: ARM: DMA: ensure that old section mappings are flushed from the TLB
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 6b076991dca9817e75c37e2f0db6d52611ea42fa upstream.
+
+When setting up the CMA region, we must ensure that the old section
+mappings are flushed from the TLB before replacing them with page
+tables, otherwise we can suffer from mismatched aliases if the CPU
+speculatively prefetches from these mappings at an inopportune time.
+
+A mismatched alias can occur when the TLB contains a section mapping,
+but a subsequent prefetch causes it to load a page table mapping,
+resulting in the possibility of the TLB containing two matching
+mappings for the same virtual address region.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/dma-mapping.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -429,12 +429,21 @@ void __init dma_contiguous_remap(void)
+               map.type = MT_MEMORY_DMA_READY;
+               /*
+-               * Clear previous low-memory mapping
++               * Clear previous low-memory mapping to ensure that the
++               * TLB does not see any conflicting entries, then flush
++               * the TLB of the old entries before creating new mappings.
++               *
++               * This ensures that any speculatively loaded TLB entries
++               * (even though they may be rare) can not cause any problems,
++               * and ensures that this code is architecturally compliant.
+                */
+               for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+                    addr += PMD_SIZE)
+                       pmd_clear(pmd_off_k(addr));
++              flush_tlb_kernel_range(__phys_to_virt(start),
++                                     __phys_to_virt(end));
++
+               iotable_init(&map, 1);
+       }
+ }
diff --git a/queue-3.10/arm-fix-asm-memory.h-build-error.patch b/queue-3.10/arm-fix-asm-memory.h-build-error.patch
new file mode 100644 (file)
index 0000000..76aea45
--- /dev/null
@@ -0,0 +1,117 @@
+From b713aa0b15015a65ad5421543b80df86de043d62 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Tue, 10 Dec 2013 19:21:08 +0000
+Subject: ARM: fix asm/memory.h build error
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit b713aa0b15015a65ad5421543b80df86de043d62 upstream.
+
+Jason Gunthorpe reports a build failure when ARM_PATCH_PHYS_VIRT is
+not defined:
+
+In file included from arch/arm/include/asm/page.h:163:0,
+                 from include/linux/mm_types.h:16,
+                 from include/linux/sched.h:24,
+                 from arch/arm/kernel/asm-offsets.c:13:
+arch/arm/include/asm/memory.h: In function '__virt_to_phys':
+arch/arm/include/asm/memory.h:244:40: error: 'PHYS_OFFSET' undeclared (first use in this function)
+arch/arm/include/asm/memory.h:244:40: note: each undeclared identifier is reported only once for each function it appears in
+arch/arm/include/asm/memory.h: In function '__phys_to_virt':
+arch/arm/include/asm/memory.h:249:13: error: 'PHYS_OFFSET' undeclared (first use in this function)
+
+Fixes: ca5a45c06cd4 ("ARM: mm: use phys_addr_t appropriately in p2v and v2p conversions")
+Tested-By: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+[hpy: Backported to 3.10:
+ - adjust the context
+ - MPU is not supported by 3.10, so ignore fix to MPU compared with the original patch.]
+Signed-off-by: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/memory.h |   31 +++++++++++++++----------------
+ arch/arm/kernel/head.S        |    2 +-
+ 2 files changed, 16 insertions(+), 17 deletions(-)
+
+--- a/arch/arm/include/asm/memory.h
++++ b/arch/arm/include/asm/memory.h
+@@ -98,23 +98,19 @@
+ #define TASK_UNMAPPED_BASE    UL(0x00000000)
+ #endif
+-#ifndef PHYS_OFFSET
+-#define PHYS_OFFSET           UL(CONFIG_DRAM_BASE)
+-#endif
+-
+ #ifndef END_MEM
+ #define END_MEM               (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
+ #endif
+ #ifndef PAGE_OFFSET
+-#define PAGE_OFFSET           (PHYS_OFFSET)
++#define PAGE_OFFSET           PLAT_PHYS_OFFSET
+ #endif
+ /*
+  * The module can be at any place in ram in nommu mode.
+  */
+ #define MODULES_END           (END_MEM)
+-#define MODULES_VADDR         (PHYS_OFFSET)
++#define MODULES_VADDR         PAGE_OFFSET
+ #define XIP_VIRT_ADDR(physaddr)  (physaddr)
+@@ -141,6 +137,16 @@
+ #define page_to_phys(page)    (__pfn_to_phys(page_to_pfn(page)))
+ #define phys_to_page(phys)    (pfn_to_page(__phys_to_pfn(phys)))
++/*
++ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
++ * memory.  This is used for XIP and NoMMU kernels, or by kernels which
++ * have their own mach/memory.h.  Assembly code must always use
++ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
++ */
++#ifndef PLAT_PHYS_OFFSET
++#define PLAT_PHYS_OFFSET      UL(CONFIG_PHYS_OFFSET)
++#endif
++
+ #ifndef __ASSEMBLY__
+ /*
+@@ -183,22 +189,15 @@ static inline unsigned long __phys_to_vi
+       return t;
+ }
+ #else
++
++#define PHYS_OFFSET   PLAT_PHYS_OFFSET
++
+ #define __virt_to_phys(x)     ((x) - PAGE_OFFSET + PHYS_OFFSET)
+ #define __phys_to_virt(x)     ((x) - PHYS_OFFSET + PAGE_OFFSET)
+-#endif
+-#endif
+-#endif /* __ASSEMBLY__ */
+-#ifndef PHYS_OFFSET
+-#ifdef PLAT_PHYS_OFFSET
+-#define PHYS_OFFSET   PLAT_PHYS_OFFSET
+-#else
+-#define PHYS_OFFSET   UL(CONFIG_PHYS_OFFSET)
+ #endif
+ #endif
+-#ifndef __ASSEMBLY__
+-
+ /*
+  * PFNs are used to describe any physical page; this means
+  * PFN 0 == physical address 0.
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -109,7 +109,7 @@ ENTRY(stext)
+       sub     r4, r3, r4                      @ (PHYS_OFFSET - PAGE_OFFSET)
+       add     r8, r8, r4                      @ PHYS_OFFSET
+ #else
+-      ldr     r8, =PHYS_OFFSET                @ always constant in this case
++      ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
+ #endif
+       /*
diff --git a/queue-3.10/arm-fix-type-of-phys_pfn_offset-to-unsigned-long.patch b/queue-3.10/arm-fix-type-of-phys_pfn_offset-to-unsigned-long.patch
new file mode 100644 (file)
index 0000000..14e83a1
--- /dev/null
@@ -0,0 +1,41 @@
+From 5b20c5b2f014ecc0a6310988af69cd7ede9e7c67 Mon Sep 17 00:00:00 2001
+From: Cyril Chemparathy <cyril@ti.com>
+Date: Wed, 12 Sep 2012 10:19:05 -0400
+Subject: ARM: fix type of PHYS_PFN_OFFSET to unsigned long
+
+From: Cyril Chemparathy <cyril@ti.com>
+
+commit 5b20c5b2f014ecc0a6310988af69cd7ede9e7c67 upstream.
+
+On LPAE machines, PHYS_OFFSET evaluates to a phys_addr_t and this type is
+inherited by the PHYS_PFN_OFFSET definition as well.  Consequently, the kernel
+build emits warnings of the form:
+
+init/main.c: In function 'start_kernel':
+init/main.c:588:7: warning: format '%lx' expects argument of type 'long unsigned int', but argument 2 has type 'phys_addr_t' [-Wformat]
+
+This patch fixes this warning by pinning down the PFN type to unsigned long.
+
+Signed-off-by: Cyril Chemparathy <cyril@ti.com>
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Tested-by: Subash Patel <subash.rp@samsung.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/memory.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/memory.h
++++ b/arch/arm/include/asm/memory.h
+@@ -207,7 +207,7 @@ static inline unsigned long __phys_to_vi
+  * direct-mapped view.  We assume this is the first page
+  * of RAM in the mem_map as well.
+  */
+-#define PHYS_PFN_OFFSET       (PHYS_OFFSET >> PAGE_SHIFT)
++#define PHYS_PFN_OFFSET       ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+ /*
+  * These are *only* valid on the kernel direct mapped RAM memory.
diff --git a/queue-3.10/arm-lpae-fix-definition-of-pte_hwtable_ptrs.patch b/queue-3.10/arm-lpae-fix-definition-of-pte_hwtable_ptrs.patch
new file mode 100644 (file)
index 0000000..8ed9827
--- /dev/null
@@ -0,0 +1,39 @@
+From e38a517578d6c0f764b0d0f6e26dcdf9f70c69d7 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 2 May 2013 13:52:01 +0100
+Subject: ARM: lpae: fix definition of PTE_HWTABLE_PTRS
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit e38a517578d6c0f764b0d0f6e26dcdf9f70c69d7 upstream.
+
+For 2-level page tables, PTE_HWTABLE_PTRS describes the offset between
+Linux PTEs and hardware PTEs. On LPAE, there is no distinction (since
+we have 64-bit descriptors with plenty of space) so PTE_HWTABLE_PTRS
+should be 0. Unfortunately, it is wrongly defined as PTRS_PER_PTE,
+meaning that current pte table flushing is off by a page. Luckily,
+all current LPAE implementations are SMP, so the hardware walker can
+snoop L1.
+
+This patch fixes the broken definition.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable-3level.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -33,7 +33,7 @@
+ #define PTRS_PER_PMD          512
+ #define PTRS_PER_PGD          4
+-#define PTE_HWTABLE_PTRS      (PTRS_PER_PTE)
++#define PTE_HWTABLE_PTRS      (0)
+ #define PTE_HWTABLE_OFF               (0)
+ #define PTE_HWTABLE_SIZE      (PTRS_PER_PTE * sizeof(u64))
diff --git a/queue-3.10/arm-lpae-use-phys_addr_t-in-alloc_init_pud.patch b/queue-3.10/arm-lpae-use-phys_addr_t-in-alloc_init_pud.patch
new file mode 100644 (file)
index 0000000..fb83612
--- /dev/null
@@ -0,0 +1,42 @@
+From 20d6956d8cd2452cec0889ff040f18afc03c2e6b Mon Sep 17 00:00:00 2001
+From: Vitaly Andrianov <vitalya@ti.com>
+Date: Tue, 10 Jul 2012 14:41:17 -0400
+Subject: ARM: LPAE: use phys_addr_t in alloc_init_pud()
+
+From: Vitaly Andrianov <vitalya@ti.com>
+
+commit 20d6956d8cd2452cec0889ff040f18afc03c2e6b upstream.
+
+This patch fixes the alloc_init_pud() function to use phys_addr_t instead of
+unsigned long when passing in the phys argument.
+
+This is an extension to commit 97092e0c56830457af0639f6bd904537a150ea4a (ARM:
+pgtable: use phys_addr_t for physical addresses), which applied similar changes
+elsewhere in the ARM memory management code.
+
+Signed-off-by: Vitaly Andrianov <vitalya@ti.com>
+Signed-off-by: Cyril Chemparathy <cyril@ti.com>
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Tested-by: Subash Patel <subash.rp@samsung.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/mmu.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -685,7 +685,8 @@ static void __init alloc_init_pmd(pud_t
+ }
+ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+-      unsigned long end, unsigned long phys, const struct mem_type *type)
++                                unsigned long end, phys_addr_t phys,
++                                const struct mem_type *type)
+ {
+       pud_t *pud = pud_offset(pgd, addr);
+       unsigned long next;
diff --git a/queue-3.10/arm-lpae-use-signed-arithmetic-for-mask-definitions.patch b/queue-3.10/arm-lpae-use-signed-arithmetic-for-mask-definitions.patch
new file mode 100644 (file)
index 0000000..f2e9530
--- /dev/null
@@ -0,0 +1,64 @@
+From 926edcc747e2efb3c9add7ed4dbc4e7a3a959d02 Mon Sep 17 00:00:00 2001
+From: Cyril Chemparathy <cyril@ti.com>
+Date: Sun, 22 Jul 2012 13:40:38 -0400
+Subject: ARM: LPAE: use signed arithmetic for mask definitions
+
+From: Cyril Chemparathy <cyril@ti.com>
+
+commit 926edcc747e2efb3c9add7ed4dbc4e7a3a959d02 upstream.
+
+This patch applies to PAGE_MASK, PMD_MASK, and PGDIR_MASK, where forcing
+unsigned long math truncates the mask at the 32-bits.  This clearly does bad
+things on PAE systems.
+
+This patch fixes this problem by defining these masks as signed quantities.
+We then rely on sign extension to do the right thing.
+
+Signed-off-by: Cyril Chemparathy <cyril@ti.com>
+Signed-off-by: Vitaly Andrianov <vitalya@ti.com>
+Reviewed-by: Nicolas Pitre <nico@linaro.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
+Tested-by: Subash Patel <subash.rp@samsung.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/page.h           |    2 +-
+ arch/arm/include/asm/pgtable-3level.h |    6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -13,7 +13,7 @@
+ /* PAGE_SHIFT determines the page size */
+ #define PAGE_SHIFT            12
+ #define PAGE_SIZE             (_AC(1,UL) << PAGE_SHIFT)
+-#define PAGE_MASK             (~(PAGE_SIZE-1))
++#define PAGE_MASK             (~((1 << PAGE_SHIFT) - 1))
+ #ifndef __ASSEMBLY__
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -48,16 +48,16 @@
+ #define PMD_SHIFT             21
+ #define PMD_SIZE              (1UL << PMD_SHIFT)
+-#define PMD_MASK              (~(PMD_SIZE-1))
++#define PMD_MASK              (~((1 << PMD_SHIFT) - 1))
+ #define PGDIR_SIZE            (1UL << PGDIR_SHIFT)
+-#define PGDIR_MASK            (~(PGDIR_SIZE-1))
++#define PGDIR_MASK            (~((1 << PGDIR_SHIFT) - 1))
+ /*
+  * section address mask and size definitions.
+  */
+ #define SECTION_SHIFT         21
+ #define SECTION_SIZE          (1UL << SECTION_SHIFT)
+-#define SECTION_MASK          (~(SECTION_SIZE-1))
++#define SECTION_MASK          (~((1 << SECTION_SHIFT) - 1))
+ #define USER_PTRS_PER_PGD     (PAGE_OFFSET / PGDIR_SIZE)
diff --git a/queue-3.10/arm-mm-correct-pte_same-behaviour-for-lpae.patch b/queue-3.10/arm-mm-correct-pte_same-behaviour-for-lpae.patch
new file mode 100644 (file)
index 0000000..6949ce1
--- /dev/null
@@ -0,0 +1,54 @@
+From dde1b65110353517816bcbc58539463396202244 Mon Sep 17 00:00:00 2001
+From: Steve Capper <steve.capper@linaro.org>
+Date: Fri, 17 May 2013 12:32:55 +0100
+Subject: ARM: mm: correct pte_same behaviour for LPAE.
+
+From: Steve Capper <steve.capper@linaro.org>
+
+commit dde1b65110353517816bcbc58539463396202244 upstream.
+
+For 3 levels of paging the PTE_EXT_NG bit will be set for user
+address ptes that are written to a page table but not for ptes
+created with mk_pte.
+
+This can cause some comparison tests made by pte_same to fail
+spuriously and lead to other problems.
+
+To correct this behaviour, we mask off PTE_EXT_NG for any pte that
+is present before running the comparison.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Reviewed-by: Will Deacon <will.deacon@arm.com>
+Cc: Hou Pengyang <houpengyang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable-3level.h |   17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -166,6 +166,23 @@ static inline pmd_t *pmd_offset(pud_t *p
+               clean_pmd_entry(pmdp);  \
+       } while (0)
++/*
++ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
++ * that are written to a page table but not for ptes created with mk_pte.
++ *
++ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
++ * hugetlb_cow, where it is compared with an entry in a page table.
++ * This comparison test fails erroneously leading ultimately to a memory leak.
++ *
++ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
++ * present before running the comparison.
++ */
++#define __HAVE_ARCH_PTE_SAME
++#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG     \
++                                      : pte_val(pte_a))                               \
++                              == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG   \
++                                      : pte_val(pte_b)))
++
+ #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+ #endif /* __ASSEMBLY__ */
index 5421554ccd8a7b0894e215a897eeb5876cdeeb1e..ef1e1d017fdd060c6b59239a6a6f1f4d41f9591f 100644 (file)
@@ -15,3 +15,16 @@ dm-cache-fix-missing-err_ptr-returns-and-handling.patch
 spi-pxa2xx-clear-cur_chip-pointer-before-starting-next-message.patch
 regulator-core-fix-race-condition-in-regulator_put.patch
 drivers-net-cpsw-discard-dual-emac-default-vlan-configuration.patch
+arm-7829-1-add-.text.unlikely-and-.text.hot-to-arm-unwind-tables.patch
+arm-mm-correct-pte_same-behaviour-for-lpae.patch
+arm-lpae-use-signed-arithmetic-for-mask-definitions.patch
+arm-lpae-use-phys_addr_t-in-alloc_init_pud.patch
+arm-fix-type-of-phys_pfn_offset-to-unsigned-long.patch
+arm-lpae-fix-definition-of-pte_hwtable_ptrs.patch
+arm-7866-1-include-asm-use-long-long-instead-of-u64-within-atomic.h.patch
+arm-7867-1-include-asm-use-int-instead-of-unsigned-long-for-oldval-in-atomic_cmpxchg.patch
+arm-fix-asm-memory.h-build-error.patch
+arm-7931-1-correct-virt_addr_valid.patch
+arm-dma-ensure-that-old-section-mappings-are-flushed-from-the-tlb.patch
+arm-8108-1-mm-introduce-pte-pmd-_isset-and.patch
+arm-8109-1-mm-modify-pte_write-and-pmd_write-logic-for-lpae.patch