--- /dev/null
+From 624531886987f0f1b5d01fb598034d039198e090 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 7 Jun 2016 17:57:54 +0100
+Subject: ARM: 8578/1: mm: ensure pmd_present only checks the valid bit
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 624531886987f0f1b5d01fb598034d039198e090 upstream.
+
+In a subsequent patch, pmd_mknotpresent will clear the valid bit of the
+pmd entry, resulting in a not-present entry from the hardware's
+perspective. Unfortunately, pmd_present simply checks for a non-zero pmd
+value and will therefore continue to return true even after a
+pmd_mknotpresent operation. Since pmd_mknotpresent is only used for
+managing huge entries, this is only an issue for the 3-level case.
+
+This patch fixes the 3-level pmd_present implementation to take into
+account the valid bit. For bisectability, the change is made before the
+fix to pmd_mknotpresent.
+
+[catalin.marinas@arm.com: comment update regarding pmd_mknotpresent patch]
+
+Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.")
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Steve Capper <Steve.Capper@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable-2level.h | 1 +
+ arch/arm/include/asm/pgtable-3level.h | 1 +
+ arch/arm/include/asm/pgtable.h | 1 -
+ 3 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *p
+
+ #define pmd_large(pmd) (pmd_val(pmd) & 2)
+ #define pmd_bad(pmd) (pmd_val(pmd) & 2)
++#define pmd_present(pmd) (pmd_val(pmd))
+
+ #define copy_pmd(pmdpd,pmdps) \
+ do { \
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *p
+ : !!(pmd_val(pmd) & (val)))
+ #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
+
++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
+ #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
+ #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
+ static inline pte_t pte_mkspecial(pte_t pte)
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD
+ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+ #define pmd_none(pmd) (!pmd_val(pmd))
+-#define pmd_present(pmd) (pmd_val(pmd))
+
+ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ {
--- /dev/null
+From 56530f5d2ddc9b9fade7ef8db9cb886e9dc689b5 Mon Sep 17 00:00:00 2001
+From: Steve Capper <steve.capper@arm.com>
+Date: Tue, 7 Jun 2016 17:58:06 +0100
+Subject: ARM: 8579/1: mm: Fix definition of pmd_mknotpresent
+
+From: Steve Capper <steve.capper@arm.com>
+
+commit 56530f5d2ddc9b9fade7ef8db9cb886e9dc689b5 upstream.
+
+Currently pmd_mknotpresent will use a zero entry to respresent an
+invalidated pmd.
+
+Unfortunately this definition clashes with pmd_none, thus it is
+possible for a race condition to occur if zap_pmd_range sees pmd_none
+whilst __split_huge_pmd_locked is running too with pmdp_invalidate
+just called.
+
+This patch fixes the race condition by modifying pmd_mknotpresent to
+create non-zero faulting entries (as is done in other architectures),
+removing the ambiguity with pmd_none.
+
+[catalin.marinas@arm.com: using L_PMD_SECT_VALID instead of PMD_TYPE_SECT]
+
+Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.")
+Reported-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Steve Capper <steve.capper@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/pgtable-3level.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -258,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+ #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
+ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+ {
+- return __pmd(0);
++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
+ }
+
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
--- /dev/null
+From 20c15226d1c73150c4d9107301cac5dda0b7f995 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <fabio.estevam@nxp.com>
+Date: Wed, 11 May 2016 16:39:30 -0300
+Subject: ARM: imx6ul: Fix Micrel PHY mask
+
+From: Fabio Estevam <fabio.estevam@nxp.com>
+
+commit 20c15226d1c73150c4d9107301cac5dda0b7f995 upstream.
+
+The value used for Micrel PHY mask is not correct. Use the
+MICREL_PHY_ID_MASK definition instead.
+
+Thanks to Jiri Luznicky for proposing the fix at
+https://community.freescale.com/thread/387739
+
+Fixes: 709bc0657fe6f9f55 ("ARM: imx6ul: add fec MAC refrence clock and phy fixup init")
+Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-imx/mach-imx6ul.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mach-imx/mach-imx6ul.c
++++ b/arch/arm/mach-imx/mach-imx6ul.c
+@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_
+ static void __init imx6ul_enet_phy_init(void)
+ {
+ if (IS_BUILTIN(CONFIG_PHYLIB))
+- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
++ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
+ ksz8081_phy_fixup);
+ }
+
--- /dev/null
+From 797179bc4fe06c89e47a9f36f886f68640b423f8 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Thu, 9 Jun 2016 10:50:43 +0100
+Subject: MIPS: KVM: Fix modular KVM under QEMU
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 797179bc4fe06c89e47a9f36f886f68640b423f8 upstream.
+
+Copy __kvm_mips_vcpu_run() into unmapped memory, so that we can never
+get a TLB refill exception in it when KVM is built as a module.
+
+This was observed to happen with the host MIPS kernel running under
+QEMU, due to a not entirely transparent optimisation in the QEMU TLB
+handling where TLB entries replaced with TLBWR are copied to a separate
+part of the TLB array. Code in those pages continue to be executable,
+but those mappings persist only until the next ASID switch, even if they
+are marked global.
+
+An ASID switch happens in __kvm_mips_vcpu_run() at exception level after
+switching to the guest exception base. Subsequent TLB mapped kernel
+instructions just prior to switching to the guest trigger a TLB refill
+exception, which enters the guest exception handlers without updating
+EPC. This appears as a guest triggered TLB refill on a host kernel
+mapped (host KSeg2) address, which is not handled correctly as user
+(guest) mode accesses to kernel (host) segments always generate address
+error exceptions.
+
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: kvm@vger.kernel.org
+Cc: linux-mips@linux-mips.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/kvm_host.h | 1 +
+ arch/mips/kvm/interrupt.h | 1 +
+ arch/mips/kvm/locore.S | 1 +
+ arch/mips/kvm/mips.c | 11 ++++++++++-
+ 4 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -372,6 +372,7 @@ struct kvm_mips_tlb {
+ #define KVM_MIPS_GUEST_TLB_SIZE 64
+ struct kvm_vcpu_arch {
+ void *host_ebase, *guest_ebase;
++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ unsigned long host_stack;
+ unsigned long host_gp;
+
+--- a/arch/mips/kvm/interrupt.h
++++ b/arch/mips/kvm/interrupt.h
+@@ -28,6 +28,7 @@
+ #define MIPS_EXC_MAX 12
+ /* XXXSL More to follow */
+
++extern char __kvm_mips_vcpu_run_end[];
+ extern char mips32_exception[], mips32_exceptionEnd[];
+ extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
+
+ /* Jump to guest */
+ eret
++EXPORT(__kvm_mips_vcpu_run_end)
+
+ VECTOR(MIPSX(exception), unknown)
+ /* Find out what mode we came from and jump to the proper handler. */
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
+ memcpy(gebase + offset, mips32_GuestException,
+ mips32_GuestExceptionEnd - mips32_GuestException);
+
++#ifdef MODULE
++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
++ vcpu->arch.vcpu_run = gebase + offset;
++#else
++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
++#endif
++
+ /* Invalidate the icache for these ranges */
+ local_flush_icache_range((unsigned long)gebase,
+ (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+@@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+ /* Disable hardware page table walking while in guest */
+ htw_stop();
+
+- r = __kvm_mips_vcpu_run(run, vcpu);
++ r = vcpu->arch.vcpu_run(run, vcpu);
+
+ /* Re-enable HTW before enabling interrupts */
+ htw_start();
nfsd-check-permissions-when-setting-acls.patch
make-nfs_atomic_open-call-d_drop-on-all-open_context-errors.patch
nfs-fix-another-open_downgrade-bug.patch
+arm-imx6ul-fix-micrel-phy-mask.patch
+arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch
+arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch
+mips-kvm-fix-modular-kvm-under-qemu.patch
+ubifs-implement-migratepage.patch
--- /dev/null
+From 4ac1c17b2044a1b4b2fbed74451947e905fc2992 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 16 Jun 2016 23:26:15 +0200
+Subject: UBIFS: Implement ->migratepage()
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit 4ac1c17b2044a1b4b2fbed74451947e905fc2992 upstream.
+
+During page migrations UBIFS might get confused
+and the following assert triggers:
+[ 213.480000] UBIFS assert failed in ubifs_set_page_dirty at 1451 (pid 436)
+[ 213.490000] CPU: 0 PID: 436 Comm: drm-stress-test Not tainted 4.4.4-00176-geaa802524636-dirty #1008
+[ 213.490000] Hardware name: Allwinner sun4i/sun5i Families
+[ 213.490000] [<c0015e70>] (unwind_backtrace) from [<c0012cdc>] (show_stack+0x10/0x14)
+[ 213.490000] [<c0012cdc>] (show_stack) from [<c02ad834>] (dump_stack+0x8c/0xa0)
+[ 213.490000] [<c02ad834>] (dump_stack) from [<c0236ee8>] (ubifs_set_page_dirty+0x44/0x50)
+[ 213.490000] [<c0236ee8>] (ubifs_set_page_dirty) from [<c00fa0bc>] (try_to_unmap_one+0x10c/0x3a8)
+[ 213.490000] [<c00fa0bc>] (try_to_unmap_one) from [<c00fadb4>] (rmap_walk+0xb4/0x290)
+[ 213.490000] [<c00fadb4>] (rmap_walk) from [<c00fb1bc>] (try_to_unmap+0x64/0x80)
+[ 213.490000] [<c00fb1bc>] (try_to_unmap) from [<c010dc28>] (migrate_pages+0x328/0x7a0)
+[ 213.490000] [<c010dc28>] (migrate_pages) from [<c00d0cb0>] (alloc_contig_range+0x168/0x2f4)
+[ 213.490000] [<c00d0cb0>] (alloc_contig_range) from [<c010ec00>] (cma_alloc+0x170/0x2c0)
+[ 213.490000] [<c010ec00>] (cma_alloc) from [<c001a958>] (__alloc_from_contiguous+0x38/0xd8)
+[ 213.490000] [<c001a958>] (__alloc_from_contiguous) from [<c001ad44>] (__dma_alloc+0x23c/0x274)
+[ 213.490000] [<c001ad44>] (__dma_alloc) from [<c001ae08>] (arm_dma_alloc+0x54/0x5c)
+[ 213.490000] [<c001ae08>] (arm_dma_alloc) from [<c035cecc>] (drm_gem_cma_create+0xb8/0xf0)
+[ 213.490000] [<c035cecc>] (drm_gem_cma_create) from [<c035cf20>] (drm_gem_cma_create_with_handle+0x1c/0xe8)
+[ 213.490000] [<c035cf20>] (drm_gem_cma_create_with_handle) from [<c035d088>] (drm_gem_cma_dumb_create+0x3c/0x48)
+[ 213.490000] [<c035d088>] (drm_gem_cma_dumb_create) from [<c0341ed8>] (drm_ioctl+0x12c/0x444)
+[ 213.490000] [<c0341ed8>] (drm_ioctl) from [<c0121adc>] (do_vfs_ioctl+0x3f4/0x614)
+[ 213.490000] [<c0121adc>] (do_vfs_ioctl) from [<c0121d30>] (SyS_ioctl+0x34/0x5c)
+[ 213.490000] [<c0121d30>] (SyS_ioctl) from [<c000f2c0>] (ret_fast_syscall+0x0/0x34)
+
+UBIFS is using PagePrivate() which can have different meanings across
+filesystems. Therefore the generic page migration code cannot handle this
+case correctly.
+We have to implement our own migration function which basically does a
+plain copy but also duplicates the page private flag.
+UBIFS is not a block device filesystem and cannot use buffer_migrate_page().
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+[rw: Massaged changelog, build fixes, etc...]
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/file.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -52,6 +52,7 @@
+ #include "ubifs.h"
+ #include <linux/mount.h>
+ #include <linux/slab.h>
++#include <linux/migrate.h>
+
+ static int read_block(struct inode *inode, void *addr, unsigned int block,
+ struct ubifs_data_node *dn)
+@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct p
+ return ret;
+ }
+
++#ifdef CONFIG_MIGRATION
++static int ubifs_migrate_page(struct address_space *mapping,
++ struct page *newpage, struct page *page, enum migrate_mode mode)
++{
++ int rc;
++
++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
++ if (rc != MIGRATEPAGE_SUCCESS)
++ return rc;
++
++ if (PagePrivate(page)) {
++ ClearPagePrivate(page);
++ SetPagePrivate(newpage);
++ }
++
++ migrate_page_copy(newpage, page);
++ return MIGRATEPAGE_SUCCESS;
++}
++#endif
++
+ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+ {
+ /*
+@@ -1591,6 +1612,9 @@ const struct address_space_operations ub
+ .write_end = ubifs_write_end,
+ .invalidatepage = ubifs_invalidatepage,
+ .set_page_dirty = ubifs_set_page_dirty,
++#ifdef CONFIG_MIGRATION
++ .migratepage = ubifs_migrate_page,
++#endif
+ .releasepage = ubifs_releasepage,
+ };
+