--- /dev/null
+From 69e4e63ec816a7e22cc3aa14bc7ef4ac734d370c Mon Sep 17 00:00:00 2001
+From: Manuel Lauss <manuel.lauss@gmail.com>
+Date: Wed, 18 Feb 2015 11:01:56 +0100
+Subject: MIPS: Alchemy: Fix cpu clock calculation
+
+From: Manuel Lauss <manuel.lauss@gmail.com>
+
+commit 69e4e63ec816a7e22cc3aa14bc7ef4ac734d370c upstream.
+
+The current code uses bits 0-6 of the sys_cpupll register to calculate
+core clock speed. However this is only valid on Au1300, on all earlier
+models the hardware only uses bits 0-5 to generate core clock.
+
+This fixes clock calculation on the MTX1 (Au1500), where bit 6 of cpupll
+is set as well, which ultimately lead the code to calculate a bogus cpu
+core clock and also uart base clock down the line.
+
+Signed-off-by: Manuel Lauss <manuel.lauss@gmail.com>
+Reported-by: John Crispin <blogic@openwrt.org>
+Tested-by: Bruno Randolf <br1@einfach.org>
+Cc: Linux-MIPS <linux-mips@linux-mips.org>
+Patchwork: https://patchwork.linux-mips.org/patch/9279/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/alchemy/common/clock.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/alchemy/common/clock.c
++++ b/arch/mips/alchemy/common/clock.c
+@@ -127,6 +127,8 @@ static unsigned long alchemy_clk_cpu_rec
+ t = 396000000;
+ else {
+ t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
++ if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
++ t &= 0x3f;
+ t *= parent_rate;
+ }
+
--- /dev/null
+From 98a833c1fa4de0695830f77b2d13fd86693da298 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Wed, 5 Nov 2014 14:17:52 +0000
+Subject: MIPS: asm: asmmacro: Replace "add" instructions with "addu"
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 98a833c1fa4de0695830f77b2d13fd86693da298 upstream.
+
+The "add" instruction is actually a macro in binutils and depending on
+the size of the immediate it can expand to an "addi" instruction.
+However, the "addi" instruction traps on overflows which is not
+something we want on address calculation.
+
+Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00121.html
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Maciej W. Rozycki <macro@linux-mips.org>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/asmmacro.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -304,7 +304,7 @@
+ .set push
+ .set noat
+ SET_HARDFLOAT
+- add $1, \base, \off
++ addu $1, \base, \off
+ .word LDD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
+@@ -313,7 +313,7 @@
+ .set push
+ .set noat
+ SET_HARDFLOAT
+- add $1, \base, \off
++ addu $1, \base, \off
+ .word STD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
--- /dev/null
+From 461d1597ffad7a826f8aaa63ab0727c37b632e34 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 26 Jan 2015 09:40:34 +0000
+Subject: MIPS: asm: pgtable: Add c0 hazards on HTW start/stop sequences
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit 461d1597ffad7a826f8aaa63ab0727c37b632e34 upstream.
+
+When we use htw_{start,stop}() outside of htw_reset(), we need
+to ensure that c0 changes have been propagated properly before
+we attempt to continue with subsequence memory operations.
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9114/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/pgtable.h | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -99,16 +99,20 @@ extern void paging_init(void);
+
+ #define htw_stop() \
+ do { \
+- if (cpu_has_htw) \
++ if (cpu_has_htw) { \
+ write_c0_pwctl(read_c0_pwctl() & \
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
++ back_to_back_c0_hazard(); \
++ } \
+ } while(0)
+
+ #define htw_start() \
+ do { \
+- if (cpu_has_htw) \
++ if (cpu_has_htw) { \
+ write_c0_pwctl(read_c0_pwctl() | \
+ (1 << MIPS_PWCTL_PWEN_SHIFT)); \
++ back_to_back_c0_hazard(); \
++ } \
+ } while(0)
+
+
+@@ -116,9 +120,7 @@ do { \
+ do { \
+ if (cpu_has_htw) { \
+ htw_stop(); \
+- back_to_back_c0_hazard(); \
+ htw_start(); \
+- back_to_back_c0_hazard(); \
+ } \
+ } while(0)
+
--- /dev/null
+From fde3538a8a711aedf1173ecb2d45aed868f51c97 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 26 Jan 2015 09:40:36 +0000
+Subject: MIPS: asm: pgtable: Prevent HTW race when updating PTEs
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit fde3538a8a711aedf1173ecb2d45aed868f51c97 upstream.
+
+Whenever we modify a page table entry, we need to ensure that the HTW
+will not fetch a stable entry. And for that to happen we need to ensure
+that HTW is stopped before we modify the said entry otherwise the HTW
+may already be in the process of reading that entry and fetching the
+old information. As a result of which, we replace the htw_reset() calls
+with htw_{stop,start} in more appropriate places. This also removes the
+remaining users of htw_reset() and as a result we drop that macro
+
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9116/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/pgtable.h | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -116,14 +116,6 @@ do { \
+ } while(0)
+
+
+-#define htw_reset() \
+-do { \
+- if (cpu_has_htw) { \
+- htw_stop(); \
+- htw_start(); \
+- } \
+-} while(0)
+-
+ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pteval);
+
+@@ -155,12 +147,13 @@ static inline void pte_clear(struct mm_s
+ {
+ pte_t null = __pte(0);
+
++ htw_stop();
+ /* Preserve global status for the pair */
+ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ null.pte_low = null.pte_high = _PAGE_GLOBAL;
+
+ set_pte_at(mm, addr, ptep, null);
+- htw_reset();
++ htw_start();
+ }
+ #else
+
+@@ -190,6 +183,7 @@ static inline void set_pte(pte_t *ptep,
+
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
++ htw_stop();
+ #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
+ /* Preserve global status for the pair */
+ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+@@ -197,7 +191,7 @@ static inline void pte_clear(struct mm_s
+ else
+ #endif
+ set_pte_at(mm, addr, ptep, __pte(0));
+- htw_reset();
++ htw_start();
+ }
+ #endif
+
--- /dev/null
+From 3ce465e04bfd8de9956d515d6e9587faac3375dc Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 10 Feb 2015 10:02:59 +0000
+Subject: MIPS: Export FP functions used by lose_fpu(1) for KVM
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 3ce465e04bfd8de9956d515d6e9587faac3375dc upstream.
+
+Export the _save_fp asm function used by the lose_fpu(1) macro to GPL
+modules so that KVM can make use of it when it is built as a module.
+
+This fixes the following build error when CONFIG_KVM=m due to commit
+f798217dfd03 ("KVM: MIPS: Don't leak FPU/DSP to guest"):
+
+ERROR: "_save_fp" [arch/mips/kvm/kvm.ko] undefined!
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Fixes: f798217dfd03 (KVM: MIPS: Don't leak FPU/DSP to guest)
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: kvm@vger.kernel.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9260/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/mips_ksyms.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/mips/kernel/mips_ksyms.c
++++ b/arch/mips/kernel/mips_ksyms.c
+@@ -14,6 +14,7 @@
+ #include <linux/mm.h>
+ #include <asm/uaccess.h>
+ #include <asm/ftrace.h>
++#include <asm/fpu.h>
+
+ extern void *__bzero(void *__s, size_t __count);
+ extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+@@ -32,6 +33,11 @@ extern long __strnlen_user_nocheck_asm(c
+ extern long __strnlen_user_asm(const char *s);
+
+ /*
++ * Core architecture code
++ */
++EXPORT_SYMBOL_GPL(_save_fp);
++
++/*
+ * String functions
+ */
+ EXPORT_SYMBOL(memset);
--- /dev/null
+From ca5d25642e212f73492d332d95dc90ef46a0e8dc Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 10 Feb 2015 10:03:00 +0000
+Subject: MIPS: Export MSA functions used by lose_fpu(1) for KVM
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit ca5d25642e212f73492d332d95dc90ef46a0e8dc upstream.
+
+Export the _save_msa asm function used by the lose_fpu(1) macro to GPL
+modules so that KVM can make use of it when it is built as a module.
+
+This fixes the following build error when CONFIG_KVM=m and
+CONFIG_CPU_HAS_MSA=y due to commit f798217dfd03 ("KVM: MIPS: Don't leak
+FPU/DSP to guest"):
+
+ERROR: "_save_msa" [arch/mips/kvm/kvm.ko] undefined!
+
+Fixes: f798217dfd03 (KVM: MIPS: Don't leak FPU/DSP to guest)
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: kvm@vger.kernel.org
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/9261/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/mips_ksyms.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/mips/kernel/mips_ksyms.c
++++ b/arch/mips/kernel/mips_ksyms.c
+@@ -15,6 +15,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/ftrace.h>
+ #include <asm/fpu.h>
++#include <asm/msa.h>
+
+ extern void *__bzero(void *__s, size_t __count);
+ extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+@@ -36,6 +37,9 @@ extern long __strnlen_user_asm(const cha
+ * Core architecture code
+ */
+ EXPORT_SYMBOL_GPL(_save_fp);
++#ifdef CONFIG_CPU_HAS_MSA
++EXPORT_SYMBOL_GPL(_save_msa);
++#endif
+
+ /*
+ * String functions
--- /dev/null
+From acac4108df6029c03195513ead7073bbb0cb9718 Mon Sep 17 00:00:00 2001
+From: Markos Chandras <markos.chandras@imgtec.com>
+Date: Mon, 24 Nov 2014 14:40:11 +0000
+Subject: MIPS: kernel: cps-vec: Replace "addi" with "addiu"
+
+From: Markos Chandras <markos.chandras@imgtec.com>
+
+commit acac4108df6029c03195513ead7073bbb0cb9718 upstream.
+
+The "addi" instruction will trap on overflows which is not something
+we need in this code, so we replace that with "addiu".
+
+Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00430.html
+Cc: Maciej W. Rozycki <macro@linux-mips.org>
+Cc: Paul Burton <paul.burton@imgtec.com>
+Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cps-vec.S | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -99,11 +99,11 @@ not_nmi:
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+- addi t1, t1, 1
++ addiu t1, t1, 1
+ sllv t1, t3, t1
+ 1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+- addi t2, t2, 1
++ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+@@ -126,11 +126,11 @@ icache_done:
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+- addi t1, t1, 1
++ addiu t1, t1, 1
+ sllv t1, t3, t1
+ 1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+- addi t2, t2, 1
++ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+- addi t7, t0, 1
++ addiu t7, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+- addi t5, t5, 1
++ addiu t5, t5, 1
+ slt t0, t5, t7
+ bnez t0, 1b
+ nop
+@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+- addi t1, t1, 1
++ addiu t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
+
+ /* Next VPE */
+ 2: srl t6, t6, 1
+- addi t5, t5, 1
++ addiu t5, t5, 1
+ bnez t6, 1b
+ nop
+
--- /dev/null
+From cbef8478bee55775ac312a574aad48af7bb9cf9f Mon Sep 17 00:00:00 2001
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Date: Wed, 11 Feb 2015 15:25:19 -0800
+Subject: mm/hugetlb: pmd_huge() returns true for non-present hugepage
+
+From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+
+commit cbef8478bee55775ac312a574aad48af7bb9cf9f upstream.
+
+Migrating hugepages and hwpoisoned hugepages are considered as non-present
+hugepages, and they are referenced via migration entries and hwpoison
+entries in their page table slots.
+
+This behavior causes race condition because pmd_huge() doesn't tell
+non-huge pages from migrating/hwpoisoned hugepages. follow_page_mask() is
+one example where the kernel would call follow_page_pte() for such
+hugepage while this function is supposed to handle only normal pages.
+
+To avoid this, this patch makes pmd_huge() return true when pmd_none() is
+true *and* pmd_present() is false. We don't have to worry about mixing up
+non-present pmd entry with normal pmd (pointing to leaf level pte entry)
+because pmd_present() is true in normal pmd.
+
+The same race condition could happen in (x86-specific) gup_pmd_range(),
+where this patch simply adds pmd_present() check instead of pmd_huge().
+This is because gup_pmd_range() is fast path. If we have non-present
+hugepage in this function, we will go into gup_huge_pmd(), then return 0
+at flag mask check, and finally fall back to the slow path.
+
+Fixes: 290408d4a2 ("hugetlb: hugepage migration core")
+Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Mel Gorman <mel@csn.ul.ie>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Luiz Capitulino <lcapitulino@redhat.com>
+Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
+Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
+Cc: Steve Capper <steve.capper@linaro.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/gup.c | 2 +-
+ arch/x86/mm/hugetlbpage.c | 8 +++++++-
+ mm/hugetlb.c | 2 ++
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsi
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+- if (unlikely(pmd_large(pmd))) {
++ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
+ /*
+ * NUMA hinting faults need to be handled in the GUP
+ * slowpath for accounting purposes and so that they
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, u
+ return ERR_PTR(-EINVAL);
+ }
+
++/*
++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
++ * Otherwise, returns 0.
++ */
+ int pmd_huge(pmd_t pmd)
+ {
+- return !!(pmd_val(pmd) & _PAGE_PSE);
++ return !pmd_none(pmd) &&
++ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
+ }
+
+ int pud_huge(pud_t pud)
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3666,6 +3666,8 @@ follow_huge_pmd(struct mm_struct *mm, un
+ {
+ struct page *page;
+
++ if (!pmd_present(*pmd))
++ return NULL;
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
nfs-struct-nfs_commit_info.lock-must-always-point-to-inode-i_lock.patch
kvm-mips-disable-htw-while-in-guest.patch
kvm-mips-don-t-leak-fpu-dsp-to-guest.patch
+mips-alchemy-fix-cpu-clock-calculation.patch
+mips-kernel-cps-vec-replace-addi-with-addiu.patch
+mips-asm-asmmacro-replace-add-instructions-with-addu.patch
+mips-asm-pgtable-add-c0-hazards-on-htw-start-stop-sequences.patch
+mips-asm-pgtable-prevent-htw-race-when-updating-ptes.patch
+mips-export-fp-functions-used-by-lose_fpu-1-for-kvm.patch
+mips-export-msa-functions-used-by-lose_fpu-1-for-kvm.patch
+mm-hugetlb-pmd_huge-returns-true-for-non-present-hugepage.patch