From: Greg Kroah-Hartman Date: Thu, 25 Nov 2021 13:09:12 +0000 (+0100) Subject: update queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch X-Git-Tag: v5.10.82~3 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=8d72d216cf839f39d725319b5eb55433b1ae847e;p=thirdparty%2Fkernel%2Fstable-queue.git update queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch --- diff --git a/queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch b/queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch index 57fc0c1c41e..90a3a7a17f5 100644 --- a/queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch +++ b/queue-4.4/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch @@ -31,12 +31,114 @@ Cc: KAMEZAWA Hiroyuki Cc: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman - --- - include/asm-generic/tlb.h | 7 +++++++ - mm/hugetlb.c | 5 ++++- - 2 files changed, 11 insertions(+), 1 deletion(-) + arch/arm/include/asm/tlb.h | 8 ++++++++ + arch/ia64/include/asm/tlb.h | 10 ++++++++++ + arch/s390/include/asm/tlb.h | 13 +++++++++++++ + arch/sh/include/asm/tlb.h | 10 ++++++++++ + arch/um/include/asm/tlb.h | 12 ++++++++++++ + include/asm-generic/tlb.h | 7 +++++++ + mm/hugetlb.c | 5 ++++- + 7 files changed, 64 insertions(+), 1 deletion(-) +--- a/arch/arm/include/asm/tlb.h ++++ b/arch/arm/include/asm/tlb.h +@@ -257,6 +257,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gath + tlb_add_flush(tlb, addr); + } + ++static inline void ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, ++ unsigned long size) ++{ ++ tlb_add_flush(tlb, address); ++ tlb_add_flush(tlb, address + size - PMD_SIZE); ++} ++ + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) + #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) + #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) +--- a/arch/ia64/include/asm/tlb.h ++++ b/arch/ia64/include/asm/tlb.h +@@ -251,6 +251,16 @@ __tlb_remove_tlb_entry (struct mmu_gathe + tlb->end_addr = address + PAGE_SIZE; + } + ++static inline void ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, ++ unsigned long size) ++{ ++ if (tlb->start_addr > address) ++ tlb->start_addr = address; ++ if (tlb->end_addr < address + size) ++ tlb->end_addr = address + size; ++} ++ + #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) + + #define tlb_start_vma(tlb, vma) do { } while (0) +--- a/arch/s390/include/asm/tlb.h ++++ b/arch/s390/include/asm/tlb.h +@@ -97,6 +97,19 @@ static inline void tlb_remove_page(struc + { + free_page_and_swap_cache(page); + } ++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, ++ unsigned long address, unsigned long size) ++{ ++ /* ++ * the range might exceed the original range that was provided to ++ * tlb_gather_mmu(), so we need to update it despite the fact it is ++ * usually not updated. ++ */ ++ if (tlb->start > address) ++ tlb->start = address; ++ if (tlb->end < address + size) ++ tlb->end = address + size; ++} + + /* + * pte_free_tlb frees a pte table and clears the CRSTE for the +--- a/arch/sh/include/asm/tlb.h ++++ b/arch/sh/include/asm/tlb.h +@@ -65,6 +65,16 @@ tlb_remove_tlb_entry(struct mmu_gather * + tlb->end = address + PAGE_SIZE; + } + ++static inline void ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, ++ unsigned long size) ++{ ++ if (tlb->start > address) ++ tlb->start = address; ++ if (tlb->end < address + size) ++ tlb->end = address + size; ++} ++ + /* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, +--- a/arch/um/include/asm/tlb.h ++++ b/arch/um/include/asm/tlb.h +@@ -110,6 +110,18 @@ static inline void tlb_remove_page(struc + __tlb_remove_page(tlb, page); + } + ++static inline void ++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, ++ unsigned long size) ++{ ++ tlb->need_flush = 1; ++ ++ if (tlb->start > address) ++ tlb->start = address; ++ if (tlb->end < address + size) ++ tlb->end = address + size; ++} ++ + /** + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. + * --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -165,6 +165,13 @@ static inline void __tlb_reset_range(str