From: Greg Kroah-Hartman Date: Mon, 11 Jul 2016 23:37:10 +0000 (-0700) Subject: 4.4-stable patches X-Git-Tag: v4.6.5~35 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ecf626b4b18ef5ffd77045732d026a7e35dc9a1e;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch arm-imx6ul-fix-micrel-phy-mask.patch mips-kvm-fix-modular-kvm-under-qemu.patch ubifs-implement-migratepage.patch --- diff --git a/queue-4.4/arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch b/queue-4.4/arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch new file mode 100644 index 00000000000..af17abf2cde --- /dev/null +++ b/queue-4.4/arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch @@ -0,0 +1,66 @@ +From 624531886987f0f1b5d01fb598034d039198e090 Mon Sep 17 00:00:00 2001 +From: Will Deacon +Date: Tue, 7 Jun 2016 17:57:54 +0100 +Subject: ARM: 8578/1: mm: ensure pmd_present only checks the valid bit + +From: Will Deacon + +commit 624531886987f0f1b5d01fb598034d039198e090 upstream. + +In a subsequent patch, pmd_mknotpresent will clear the valid bit of the +pmd entry, resulting in a not-present entry from the hardware's +perspective. Unfortunately, pmd_present simply checks for a non-zero pmd +value and will therefore continue to return true even after a +pmd_mknotpresent operation. Since pmd_mknotpresent is only used for +managing huge entries, this is only an issue for the 3-level case. + +This patch fixes the 3-level pmd_present implementation to take into +account the valid bit. For bisectability, the change is made before the +fix to pmd_mknotpresent. + +[catalin.marinas@arm.com: comment update regarding pmd_mknotpresent patch] + +Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.") +Cc: Russell King +Cc: Steve Capper +Signed-off-by: Will Deacon +Signed-off-by: Catalin Marinas +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/include/asm/pgtable-2level.h | 1 + + arch/arm/include/asm/pgtable-3level.h | 1 + + arch/arm/include/asm/pgtable.h | 1 - + 3 files changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/arm/include/asm/pgtable-2level.h ++++ b/arch/arm/include/asm/pgtable-2level.h +@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *p + + #define pmd_large(pmd) (pmd_val(pmd) & 2) + #define pmd_bad(pmd) (pmd_val(pmd) & 2) ++#define pmd_present(pmd) (pmd_val(pmd)) + + #define copy_pmd(pmdpd,pmdps) \ + do { \ +--- a/arch/arm/include/asm/pgtable-3level.h ++++ b/arch/arm/include/asm/pgtable-3level.h +@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *p + : !!(pmd_val(pmd) & (val))) + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) + ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) + #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) + static inline pte_t pte_mkspecial(pte_t pte) +--- a/arch/arm/include/asm/pgtable.h ++++ b/arch/arm/include/asm/pgtable.h +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) + + #define pmd_none(pmd) (!pmd_val(pmd)) +-#define pmd_present(pmd) (pmd_val(pmd)) + + static inline pte_t *pmd_page_vaddr(pmd_t pmd) + { diff --git a/queue-4.4/arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch b/queue-4.4/arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch new file mode 100644 index 00000000000..6718332b4a8 --- /dev/null +++ b/queue-4.4/arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch @@ -0,0 +1,51 @@ +From 56530f5d2ddc9b9fade7ef8db9cb886e9dc689b5 Mon Sep 17 00:00:00 2001 +From: Steve Capper +Date: Tue, 7 Jun 2016 17:58:06 +0100 +Subject: ARM: 8579/1: mm: Fix definition of pmd_mknotpresent + +From: Steve Capper + +commit 56530f5d2ddc9b9fade7ef8db9cb886e9dc689b5 upstream. + +Currently pmd_mknotpresent will use a zero entry to respresent an +invalidated pmd. + +Unfortunately this definition clashes with pmd_none, thus it is +possible for a race condition to occur if zap_pmd_range sees pmd_none +whilst __split_huge_pmd_locked is running too with pmdp_invalidate +just called. + +This patch fixes the race condition by modifying pmd_mknotpresent to +create non-zero faulting entries (as is done in other architectures), +removing the ambiguity with pmd_none. + +[catalin.marinas@arm.com: using L_PMD_SECT_VALID instead of PMD_TYPE_SECT] + +Fixes: 8d9625070073 ("ARM: mm: Transparent huge page support for LPAE systems.") +Reported-by: Kirill A. Shutemov +Acked-by: Will Deacon +Cc: Russell King +Signed-off-by: Steve Capper +Signed-off-by: Catalin Marinas +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/include/asm/pgtable-3level.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm/include/asm/pgtable-3level.h ++++ b/arch/arm/include/asm/pgtable-3level.h +@@ -258,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) + +-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ ++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */ + static inline pmd_t pmd_mknotpresent(pmd_t pmd) + { +- return __pmd(0); ++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); + } + + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) diff --git a/queue-4.4/arm-imx6ul-fix-micrel-phy-mask.patch b/queue-4.4/arm-imx6ul-fix-micrel-phy-mask.patch new file mode 100644 index 00000000000..14f6a161a11 --- /dev/null +++ b/queue-4.4/arm-imx6ul-fix-micrel-phy-mask.patch @@ -0,0 +1,36 @@ +From 20c15226d1c73150c4d9107301cac5dda0b7f995 Mon Sep 17 00:00:00 2001 +From: Fabio Estevam +Date: Wed, 11 May 2016 16:39:30 -0300 +Subject: ARM: imx6ul: Fix Micrel PHY mask + +From: Fabio Estevam + +commit 20c15226d1c73150c4d9107301cac5dda0b7f995 upstream. + +The value used for Micrel PHY mask is not correct. Use the +MICREL_PHY_ID_MASK definition instead. + +Thanks to Jiri Luznicky for proposing the fix at +https://community.freescale.com/thread/387739 + +Fixes: 709bc0657fe6f9f55 ("ARM: imx6ul: add fec MAC refrence clock and phy fixup init") +Signed-off-by: Fabio Estevam +Reviewed-by: Andrew Lunn +Signed-off-by: Shawn Guo +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/mach-imx/mach-imx6ul.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/mach-imx/mach-imx6ul.c ++++ b/arch/arm/mach-imx/mach-imx6ul.c +@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_ + static void __init imx6ul_enet_phy_init(void) + { + if (IS_BUILTIN(CONFIG_PHYLIB)) +- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, ++ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK, + ksz8081_phy_fixup); + } + diff --git a/queue-4.4/mips-kvm-fix-modular-kvm-under-qemu.patch b/queue-4.4/mips-kvm-fix-modular-kvm-under-qemu.patch new file mode 100644 index 00000000000..2d64377873f --- /dev/null +++ b/queue-4.4/mips-kvm-fix-modular-kvm-under-qemu.patch @@ -0,0 +1,104 @@ +From 797179bc4fe06c89e47a9f36f886f68640b423f8 Mon Sep 17 00:00:00 2001 +From: James Hogan +Date: Thu, 9 Jun 2016 10:50:43 +0100 +Subject: MIPS: KVM: Fix modular KVM under QEMU +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: James Hogan + +commit 797179bc4fe06c89e47a9f36f886f68640b423f8 upstream. + +Copy __kvm_mips_vcpu_run() into unmapped memory, so that we can never +get a TLB refill exception in it when KVM is built as a module. + +This was observed to happen with the host MIPS kernel running under +QEMU, due to a not entirely transparent optimisation in the QEMU TLB +handling where TLB entries replaced with TLBWR are copied to a separate +part of the TLB array. Code in those pages continue to be executable, +but those mappings persist only until the next ASID switch, even if they +are marked global. + +An ASID switch happens in __kvm_mips_vcpu_run() at exception level after +switching to the guest exception base. Subsequent TLB mapped kernel +instructions just prior to switching to the guest trigger a TLB refill +exception, which enters the guest exception handlers without updating +EPC. This appears as a guest triggered TLB refill on a host kernel +mapped (host KSeg2) address, which is not handled correctly as user +(guest) mode accesses to kernel (host) segments always generate address +error exceptions. + +Signed-off-by: James Hogan +Cc: Paolo Bonzini +Cc: Radim Krčmář +Cc: Ralf Baechle +Cc: kvm@vger.kernel.org +Cc: linux-mips@linux-mips.org +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman + +--- + arch/mips/include/asm/kvm_host.h | 1 + + arch/mips/kvm/interrupt.h | 1 + + arch/mips/kvm/locore.S | 1 + + arch/mips/kvm/mips.c | 11 ++++++++++- + 4 files changed, 13 insertions(+), 1 deletion(-) + +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -372,6 +372,7 @@ struct kvm_mips_tlb { + #define KVM_MIPS_GUEST_TLB_SIZE 64 + struct kvm_vcpu_arch { + void *host_ebase, *guest_ebase; ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + unsigned long host_stack; + unsigned long host_gp; + +--- a/arch/mips/kvm/interrupt.h ++++ b/arch/mips/kvm/interrupt.h +@@ -28,6 +28,7 @@ + #define MIPS_EXC_MAX 12 + /* XXXSL More to follow */ + ++extern char __kvm_mips_vcpu_run_end[]; + extern char mips32_exception[], mips32_exceptionEnd[]; + extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; + +--- a/arch/mips/kvm/locore.S ++++ b/arch/mips/kvm/locore.S +@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1) + + /* Jump to guest */ + eret ++EXPORT(__kvm_mips_vcpu_run_end) + + VECTOR(MIPSX(exception), unknown) + /* Find out what mode we came from and jump to the proper handler. */ +--- a/arch/mips/kvm/mips.c ++++ b/arch/mips/kvm/mips.c +@@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st + memcpy(gebase + offset, mips32_GuestException, + mips32_GuestExceptionEnd - mips32_GuestException); + ++#ifdef MODULE ++ offset += mips32_GuestExceptionEnd - mips32_GuestException; ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); ++ vcpu->arch.vcpu_run = gebase + offset; ++#else ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; ++#endif ++ + /* Invalidate the icache for these ranges */ + local_flush_icache_range((unsigned long)gebase, + (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); +@@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v + /* Disable hardware page table walking while in guest */ + htw_stop(); + +- r = __kvm_mips_vcpu_run(run, vcpu); ++ r = vcpu->arch.vcpu_run(run, vcpu); + + /* Re-enable HTW before enabling interrupts */ + htw_start(); diff --git a/queue-4.4/series b/queue-4.4/series index 4b7cb2dba66..f8c94fc3738 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -33,3 +33,8 @@ nfsd-extend-the-mutex-holding-region-around-in-nfsd4_process_open2.patch nfsd-check-permissions-when-setting-acls.patch make-nfs_atomic_open-call-d_drop-on-all-open_context-errors.patch nfs-fix-another-open_downgrade-bug.patch +arm-imx6ul-fix-micrel-phy-mask.patch +arm-8578-1-mm-ensure-pmd_present-only-checks-the-valid-bit.patch +arm-8579-1-mm-fix-definition-of-pmd_mknotpresent.patch +mips-kvm-fix-modular-kvm-under-qemu.patch +ubifs-implement-migratepage.patch diff --git a/queue-4.4/ubifs-implement-migratepage.patch b/queue-4.4/ubifs-implement-migratepage.patch new file mode 100644 index 00000000000..41f04293f33 --- /dev/null +++ b/queue-4.4/ubifs-implement-migratepage.patch @@ -0,0 +1,98 @@ +From 4ac1c17b2044a1b4b2fbed74451947e905fc2992 Mon Sep 17 00:00:00 2001 +From: "Kirill A. Shutemov" +Date: Thu, 16 Jun 2016 23:26:15 +0200 +Subject: UBIFS: Implement ->migratepage() + +From: Kirill A. Shutemov + +commit 4ac1c17b2044a1b4b2fbed74451947e905fc2992 upstream. + +During page migrations UBIFS might get confused +and the following assert triggers: +[ 213.480000] UBIFS assert failed in ubifs_set_page_dirty at 1451 (pid 436) +[ 213.490000] CPU: 0 PID: 436 Comm: drm-stress-test Not tainted 4.4.4-00176-geaa802524636-dirty #1008 +[ 213.490000] Hardware name: Allwinner sun4i/sun5i Families +[ 213.490000] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) +[ 213.490000] [] (show_stack) from [] (dump_stack+0x8c/0xa0) +[ 213.490000] [] (dump_stack) from [] (ubifs_set_page_dirty+0x44/0x50) +[ 213.490000] [] (ubifs_set_page_dirty) from [] (try_to_unmap_one+0x10c/0x3a8) +[ 213.490000] [] (try_to_unmap_one) from [] (rmap_walk+0xb4/0x290) +[ 213.490000] [] (rmap_walk) from [] (try_to_unmap+0x64/0x80) +[ 213.490000] [] (try_to_unmap) from [] (migrate_pages+0x328/0x7a0) +[ 213.490000] [] (migrate_pages) from [] (alloc_contig_range+0x168/0x2f4) +[ 213.490000] [] (alloc_contig_range) from [] (cma_alloc+0x170/0x2c0) +[ 213.490000] [] (cma_alloc) from [] (__alloc_from_contiguous+0x38/0xd8) +[ 213.490000] [] (__alloc_from_contiguous) from [] (__dma_alloc+0x23c/0x274) +[ 213.490000] [] (__dma_alloc) from [] (arm_dma_alloc+0x54/0x5c) +[ 213.490000] [] (arm_dma_alloc) from [] (drm_gem_cma_create+0xb8/0xf0) +[ 213.490000] [] (drm_gem_cma_create) from [] (drm_gem_cma_create_with_handle+0x1c/0xe8) +[ 213.490000] [] (drm_gem_cma_create_with_handle) from [] (drm_gem_cma_dumb_create+0x3c/0x48) +[ 213.490000] [] (drm_gem_cma_dumb_create) from [] (drm_ioctl+0x12c/0x444) +[ 213.490000] [] (drm_ioctl) from [] (do_vfs_ioctl+0x3f4/0x614) +[ 213.490000] [] (do_vfs_ioctl) from [] (SyS_ioctl+0x34/0x5c) +[ 213.490000] [] (SyS_ioctl) from [] (ret_fast_syscall+0x0/0x34) + +UBIFS is using PagePrivate() which can have different meanings across +filesystems. Therefore the generic page migration code cannot handle this +case correctly. +We have to implement our own migration function which basically does a +plain copy but also duplicates the page private flag. +UBIFS is not a block device filesystem and cannot use buffer_migrate_page(). + +Signed-off-by: Kirill A. Shutemov +[rw: Massaged changelog, build fixes, etc...] +Signed-off-by: Richard Weinberger +Acked-by: Christoph Hellwig +Signed-off-by: Greg Kroah-Hartman + +--- + fs/ubifs/file.c | 24 ++++++++++++++++++++++++ + 1 file changed, 24 insertions(+) + +--- a/fs/ubifs/file.c ++++ b/fs/ubifs/file.c +@@ -52,6 +52,7 @@ + #include "ubifs.h" + #include + #include ++#include + + static int read_block(struct inode *inode, void *addr, unsigned int block, + struct ubifs_data_node *dn) +@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct p + return ret; + } + ++#ifdef CONFIG_MIGRATION ++static int ubifs_migrate_page(struct address_space *mapping, ++ struct page *newpage, struct page *page, enum migrate_mode mode) ++{ ++ int rc; ++ ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); ++ if (rc != MIGRATEPAGE_SUCCESS) ++ return rc; ++ ++ if (PagePrivate(page)) { ++ ClearPagePrivate(page); ++ SetPagePrivate(newpage); ++ } ++ ++ migrate_page_copy(newpage, page); ++ return MIGRATEPAGE_SUCCESS; ++} ++#endif ++ + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) + { + /* +@@ -1591,6 +1612,9 @@ const struct address_space_operations ub + .write_end = ubifs_write_end, + .invalidatepage = ubifs_invalidatepage, + .set_page_dirty = ubifs_set_page_dirty, ++#ifdef CONFIG_MIGRATION ++ .migratepage = ubifs_migrate_page, ++#endif + .releasepage = ubifs_releasepage, + }; +