--- /dev/null
+From 39307f8ee3539478c28e71b4909b5b028cce14b1 Mon Sep 17 00:00:00 2001
+From: Daniel Rosenberg <drosen@google.com>
+Date: Thu, 3 Jun 2021 09:50:37 +0000
+Subject: f2fs: Show casefolding support only when supported
+
+From: Daniel Rosenberg <drosen@google.com>
+
+commit 39307f8ee3539478c28e71b4909b5b028cce14b1 upstream.
+
+The casefolding feature is only supported when CONFIG_UNICODE is set.
+This modifies the feature list f2fs presents under sysfs accordingly.
+
+Fixes: 5aba54302a46 ("f2fs: include charset encoding information in the superblock")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Daniel Rosenberg <drosen@google.com>
+Reviewed-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/sysfs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -612,7 +612,9 @@ F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LO
+ F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
+ #endif
+ F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
++#ifdef CONFIG_UNICODE
+ F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
++#endif
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
+ #endif
+@@ -700,7 +702,9 @@ static struct attribute *f2fs_feat_attrs
+ ATTR_LIST(verity),
+ #endif
+ ATTR_LIST(sb_checksum),
++#ifdef CONFIG_UNICODE
+ ATTR_LIST(casefold),
++#endif
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+ ATTR_LIST(compression),
+ #endif
--- /dev/null
+From 122e093c1734361dedb64f65c99b93e28e4624f4 Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.ibm.com>
+Date: Mon, 28 Jun 2021 19:33:26 -0700
+Subject: mm/page_alloc: fix memory map initialization for descending nodes
+
+From: Mike Rapoport <rppt@linux.ibm.com>
+
+commit 122e093c1734361dedb64f65c99b93e28e4624f4 upstream.
+
+On systems with memory nodes sorted in descending order, for instance Dell
+Precision WorkStation T5500, the struct pages for higher PFNs and
+respectively lower nodes, could be overwritten by the initialization of
+struct pages corresponding to the holes in the memory sections.
+
+For example for the below memory layout
+
+[ 0.245624] Early memory node ranges
+[ 0.248496] node 1: [mem 0x0000000000001000-0x0000000000090fff]
+[ 0.251376] node 1: [mem 0x0000000000100000-0x00000000dbdf8fff]
+[ 0.254256] node 1: [mem 0x0000000100000000-0x0000001423ffffff]
+[ 0.257144] node 0: [mem 0x0000001424000000-0x0000002023ffffff]
+
+the range 0x1424000000 - 0x1428000000 in the beginning of node 0 starts in
+the middle of a section and will be considered as a hole during the
+initialization of the last section in node 1.
+
+The wrong initialization of the memory map causes panic on boot when
+CONFIG_DEBUG_VM is enabled.
+
+Reorder loop order of the memory map initialization so that the outer loop
+will always iterate over populated memory regions in the ascending order
+and the inner loop will select the zone corresponding to the PFN range.
+
+This way initialization of the struct pages for the memory holes will be
+always done for the ranges that are actually not populated.
+
+[akpm@linux-foundation.org: coding style fixes]
+
+Link: https://lkml.kernel.org/r/YNXlMqBbL+tBG7yq@kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=213073
+Link: https://lkml.kernel.org/r/20210624062305.10940-1-rppt@kernel.org
+Fixes: 0740a50b9baa ("mm/page_alloc.c: refactor initialization of struct page for holes in memory layout")
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Boris Petkov <bp@alien8.de>
+Cc: Robert Shteynfeld <robert.shteynfeld@gmail.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[rppt: tweak for compatibility with IA64's override of memmap_init]
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/ia64/include/asm/pgtable.h | 5 +
+ arch/ia64/mm/init.c | 6 +-
+ mm/page_alloc.c | 106 +++++++++++++++++++++++++---------------
+ 3 files changed, 75 insertions(+), 42 deletions(-)
+
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -520,8 +520,9 @@ extern struct page *zero_page_memmap_ptr
+
+ # ifdef CONFIG_VIRTUAL_MEM_MAP
+ /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+- extern void memmap_init (unsigned long size, int nid, unsigned long zone,
+- unsigned long start_pfn);
++void memmap_init(void);
++void arch_memmap_init(unsigned long size, int nid, unsigned long zone,
++ unsigned long start_pfn);
+ # endif /* CONFIG_VIRTUAL_MEM_MAP */
+ # endif /* !__ASSEMBLY__ */
+
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -542,7 +542,7 @@ virtual_memmap_init(u64 start, u64 end,
+ }
+
+ void __meminit
+-memmap_init (unsigned long size, int nid, unsigned long zone,
++arch_memmap_init (unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn)
+ {
+ if (!vmem_map) {
+@@ -562,6 +562,10 @@ memmap_init (unsigned long size, int nid
+ }
+ }
+
++void __init memmap_init(void)
++{
++}
++
+ int
+ ia64_pfn_valid (unsigned long pfn)
+ {
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6129,7 +6129,7 @@ void __ref memmap_init_zone_device(struc
+ return;
+
+ /*
+- * The call to memmap_init_zone should have already taken care
++ * The call to memmap_init should have already taken care
+ * of the pages reserved for the memmap, so we can just jump to
+ * the end of that region and start processing the device pages.
+ */
+@@ -6194,7 +6194,7 @@ static void __meminit zone_init_free_lis
+ /*
+ * Only struct pages that correspond to ranges defined by memblock.memory
+ * are zeroed and initialized by going through __init_single_page() during
+- * memmap_init_zone().
++ * memmap_init_zone_range().
+ *
+ * But, there could be struct pages that correspond to holes in
+ * memblock.memory. This can happen because of the following reasons:
+@@ -6213,9 +6213,9 @@ static void __meminit zone_init_free_lis
+ * zone/node above the hole except for the trailing pages in the last
+ * section that will be appended to the zone/node below.
+ */
+-static u64 __meminit init_unavailable_range(unsigned long spfn,
+- unsigned long epfn,
+- int zone, int node)
++static void __init init_unavailable_range(unsigned long spfn,
++ unsigned long epfn,
++ int zone, int node)
+ {
+ unsigned long pfn;
+ u64 pgcnt = 0;
+@@ -6231,58 +6231,84 @@ static u64 __meminit init_unavailable_ra
+ pgcnt++;
+ }
+
+- return pgcnt;
++ if (pgcnt)
++ pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
++ node, zone_names[zone], pgcnt);
+ }
+ #else
+-static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
+- int zone, int node)
++static inline void init_unavailable_range(unsigned long spfn,
++ unsigned long epfn,
++ int zone, int node)
+ {
+- return 0;
+ }
+ #endif
+
+-void __meminit __weak memmap_init(unsigned long size, int nid,
+- unsigned long zone,
+- unsigned long range_start_pfn)
++static void __init memmap_init_zone_range(struct zone *zone,
++ unsigned long start_pfn,
++ unsigned long end_pfn,
++ unsigned long *hole_pfn)
++{
++ unsigned long zone_start_pfn = zone->zone_start_pfn;
++ unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
++ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
++
++ start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
++ end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
++
++ if (start_pfn >= end_pfn)
++ return;
++
++ memmap_init_zone(end_pfn - start_pfn, nid, zone_id, start_pfn,
++ zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
++
++ if (*hole_pfn < start_pfn)
++ init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
++
++ *hole_pfn = end_pfn;
++}
++
++void __init __weak memmap_init(void)
+ {
+- static unsigned long hole_pfn;
+ unsigned long start_pfn, end_pfn;
+- unsigned long range_end_pfn = range_start_pfn + size;
+- int i;
+- u64 pgcnt = 0;
++ unsigned long hole_pfn = 0;
++ int i, j, zone_id, nid;
+
+- for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+- start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+- end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
++ struct pglist_data *node = NODE_DATA(nid);
+
+- if (end_pfn > start_pfn) {
+- size = end_pfn - start_pfn;
+- memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
+- MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+- }
++ for (j = 0; j < MAX_NR_ZONES; j++) {
++ struct zone *zone = node->node_zones + j;
++
++ if (!populated_zone(zone))
++ continue;
+
+- if (hole_pfn < start_pfn)
+- pgcnt += init_unavailable_range(hole_pfn, start_pfn,
+- zone, nid);
+- hole_pfn = end_pfn;
++ memmap_init_zone_range(zone, start_pfn, end_pfn,
++ &hole_pfn);
++ zone_id = j;
++ }
+ }
+
+ #ifdef CONFIG_SPARSEMEM
+ /*
+- * Initialize the hole in the range [zone_end_pfn, section_end].
+- * If zone boundary falls in the middle of a section, this hole
+- * will be re-initialized during the call to this function for the
+- * higher zone.
++ * Initialize the memory map for hole in the range [memory_end,
++ * section_end].
++ * Append the pages in this hole to the highest zone in the last
++ * node.
++ * The call to init_unavailable_range() is outside the ifdef to
++ * silence the compiler warining about zone_id set but not used;
++ * for FLATMEM it is a nop anyway
+ */
+- end_pfn = round_up(range_end_pfn, PAGES_PER_SECTION);
++ end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
+ if (hole_pfn < end_pfn)
+- pgcnt += init_unavailable_range(hole_pfn, end_pfn,
+- zone, nid);
+ #endif
++ init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
++}
+
+- if (pgcnt)
+- pr_info(" %s zone: %llu pages in unavailable ranges\n",
+- zone_names[zone], pgcnt);
++/* A stub for backwards compatibility with custom implementatin on IA-64 */
++void __meminit __weak arch_memmap_init(unsigned long size, int nid,
++ unsigned long zone,
++ unsigned long range_start_pfn)
++{
+ }
+
+ static int zone_batchsize(struct zone *zone)
+@@ -6981,7 +7007,7 @@ static void __init free_area_init_core(s
+ set_pageblock_order();
+ setup_usemap(pgdat, zone, zone_start_pfn, size);
+ init_currently_empty_zone(zone, zone_start_pfn, size);
+- memmap_init(size, nid, j, zone_start_pfn);
++ arch_memmap_init(size, nid, j, zone_start_pfn);
+ }
+ }
+
+@@ -7507,6 +7533,8 @@ void __init free_area_init(unsigned long
+ node_set_state(nid, N_MEMORY);
+ check_for_memory(pgdat, nid);
+ }
++
++ memmap_init();
+ }
+
+ static int __init cmdline_parse_core(char *p, unsigned long *core,
--- /dev/null
+From 5fc7a5f6fd04bc18f309d9f979b32ef7d1d0a997 Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Wed, 30 Jun 2021 18:48:59 -0700
+Subject: mm/thp: simplify copying of huge zero page pmd when fork
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 5fc7a5f6fd04bc18f309d9f979b32ef7d1d0a997 upstream.
+
+Patch series "mm/uffd: Misc fix for uffd-wp and one more test".
+
+This series tries to fix some corner case bugs for uffd-wp on either thp
+or fork(). Then it introduced a new test with pagemap/pageout.
+
+Patch layout:
+
+Patch 1: cleanup for THP, it'll slightly simplify the follow up patches
+Patch 2-4: misc fixes for uffd-wp here and there; please refer to each patch
+Patch 5: add pagemap support for uffd-wp
+Patch 6: add pagemap/pageout test for uffd-wp
+
+The last test introduced can also verify some of the fixes in previous
+patches, as the test will fail without the fixes. However it's not easy
+to verify all the changes in patch 2-4, but hopefully they can still be
+properly reviewed.
+
+Note that if considering the ongoing uffd-wp shmem & hugetlbfs work, patch
+5 will be incomplete as it's missing e.g. hugetlbfs part or the special
+swap pte detection. However that's not needed in this series, and since
+that series is still during review, this series does not depend on that
+one (the last test only runs with anonymous memory, not file-backed). So
+this series can be merged even before that series.
+
+This patch (of 6):
+
+Huge zero page is handled in a special path in copy_huge_pmd(), however it
+should share most codes with a normal thp page. Trying to share more code
+with it by removing the special path. The only leftover so far is the
+huge zero page refcounting (mm_get_huge_zero_page()), because that's
+separately done with a global counter.
+
+This prepares for a future patch to modify the huge pmd to be installed,
+so that we don't need to duplicate it explicitly into huge zero page case
+too.
+
+Link: https://lkml.kernel.org/r/20210428225030.9708-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20210428225030.9708-2-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>, peterx@redhat.com
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Brian Geffon <bgeffon@google.com>
+Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
+Cc: Joe Perches <joe@perches.com>
+Cc: Lokesh Gidra <lokeshgidra@google.com>
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: Oliver Upton <oupton@google.com>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: Wang Qing <wangqing@vivo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1074,17 +1074,13 @@ int copy_huge_pmd(struct mm_struct *dst_
+ * a page table.
+ */
+ if (is_huge_zero_pmd(pmd)) {
+- struct page *zero_page;
+ /*
+ * get_huge_zero_page() will never allocate a new page here,
+ * since we already have a zero page to copy. It just takes a
+ * reference.
+ */
+- zero_page = mm_get_huge_zero_page(dst_mm);
+- set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+- zero_page);
+- ret = 0;
+- goto out_unlock;
++ mm_get_huge_zero_page(dst_mm);
++ goto out_zero_page;
+ }
+
+ src_page = pmd_page(pmd);
+@@ -1110,6 +1106,7 @@ int copy_huge_pmd(struct mm_struct *dst_
+ get_page(src_page);
+ page_dup_rmap(src_page, true);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
++out_zero_page:
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+
--- /dev/null
+From 8f34f1eac3820fc2722e5159acceb22545b30b0d Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Wed, 30 Jun 2021 18:49:02 -0700
+Subject: mm/userfaultfd: fix uffd-wp special cases for fork()
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 8f34f1eac3820fc2722e5159acceb22545b30b0d upstream.
+
+We tried to do something similar in b569a1760782 ("userfaultfd: wp: drop
+_PAGE_UFFD_WP properly when fork") previously, but it's not doing it all
+right.. A few fixes around the code path:
+
+1. We were referencing VM_UFFD_WP vm_flags on the _old_ vma rather
+ than the new vma. That's overlooked in b569a1760782, so it won't work
+ as expected. Thanks to the recent rework on fork code
+ (7a4830c380f3a8b3), we can easily get the new vma now, so switch the
+ checks to that.
+
+2. Dropping the uffd-wp bit in copy_huge_pmd() could be wrong if the
+ huge pmd is a migration huge pmd. When it happens, instead of using
+ pmd_uffd_wp(), we should use pmd_swp_uffd_wp(). The fix is simply to
+ handle them separately.
+
+3. Forget to carry over uffd-wp bit for a write migration huge pmd
+ entry. This also happens in copy_huge_pmd(), where we converted a
+ write huge migration entry into a read one.
+
+4. In copy_nonpresent_pte(), drop uffd-wp if necessary for swap ptes.
+
+5. In copy_present_page() when COW is enforced when fork(), we also
+ need to pass over the uffd-wp bit if VM_UFFD_WP is armed on the new
+ vma, and when the pte to be copied has uffd-wp bit set.
+
+Remove the comment in copy_present_pte() about this. It won't help a huge
+lot to only comment there, but comment everywhere would be an overkill.
+Let's assume the commit messages would help.
+
+[peterx@redhat.com: fix a few thp pmd missing uffd-wp bit]
+ Link: https://lkml.kernel.org/r/20210428225030.9708-4-peterx@redhat.com
+
+Link: https://lkml.kernel.org/r/20210428225030.9708-3-peterx@redhat.com
+Fixes: b569a1760782f ("userfaultfd: wp: drop _PAGE_UFFD_WP properly when fork")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Brian Geffon <bgeffon@google.com>
+Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Joe Perches <joe@perches.com>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Lokesh Gidra <lokeshgidra@google.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Mina Almasry <almasrymina@google.com>
+Cc: Oliver Upton <oupton@google.com>
+Cc: Shaohua Li <shli@fb.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: Wang Qing <wangqing@vivo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/huge_mm.h | 2 +-
+ include/linux/swapops.h | 2 ++
+ mm/huge_memory.c | 27 ++++++++++++++-------------
+ mm/memory.c | 25 +++++++++++++------------
+ 4 files changed, 30 insertions(+), 26 deletions(-)
+
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -10,7 +10,7 @@
+ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+- struct vm_area_struct *vma);
++ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -265,6 +265,8 @@ static inline swp_entry_t pmd_to_swp_ent
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
++ if (pmd_swp_uffd_wp(pmd))
++ pmd = pmd_swp_clear_uffd_wp(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+ }
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1012,7 +1012,7 @@ struct page *follow_devmap_pmd(struct vm
+
+ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+- struct vm_area_struct *vma)
++ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
+ {
+ spinlock_t *dst_ptl, *src_ptl;
+ struct page *src_page;
+@@ -1021,7 +1021,7 @@ int copy_huge_pmd(struct mm_struct *dst_
+ int ret = -ENOMEM;
+
+ /* Skip if can be re-fill on fault */
+- if (!vma_is_anonymous(vma))
++ if (!vma_is_anonymous(dst_vma))
+ return 0;
+
+ pgtable = pte_alloc_one(dst_mm);
+@@ -1035,14 +1035,6 @@ int copy_huge_pmd(struct mm_struct *dst_
+ ret = -EAGAIN;
+ pmd = *src_pmd;
+
+- /*
+- * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
+- * does not have the VM_UFFD_WP, which means that the uffd
+- * fork event is not enabled.
+- */
+- if (!(vma->vm_flags & VM_UFFD_WP))
+- pmd = pmd_clear_uffd_wp(pmd);
+-
+ #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ if (unlikely(is_swap_pmd(pmd))) {
+ swp_entry_t entry = pmd_to_swp_entry(pmd);
+@@ -1053,11 +1045,15 @@ int copy_huge_pmd(struct mm_struct *dst_
+ pmd = swp_entry_to_pmd(entry);
+ if (pmd_swp_soft_dirty(*src_pmd))
+ pmd = pmd_swp_mksoft_dirty(pmd);
++ if (pmd_swp_uffd_wp(*src_pmd))
++ pmd = pmd_swp_mkuffd_wp(pmd);
+ set_pmd_at(src_mm, addr, src_pmd, pmd);
+ }
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
++ if (!userfaultfd_wp(dst_vma))
++ pmd = pmd_swp_clear_uffd_wp(pmd);
+ set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+ ret = 0;
+ goto out_unlock;
+@@ -1093,13 +1089,13 @@ int copy_huge_pmd(struct mm_struct *dst_
+ * best effort that the pinned pages won't be replaced by another
+ * random page during the coming copy-on-write.
+ */
+- if (unlikely(is_cow_mapping(vma->vm_flags) &&
++ if (unlikely(is_cow_mapping(src_vma->vm_flags) &&
+ atomic_read(&src_mm->has_pinned) &&
+ page_maybe_dma_pinned(src_page))) {
+ pte_free(dst_mm, pgtable);
+ spin_unlock(src_ptl);
+ spin_unlock(dst_ptl);
+- __split_huge_pmd(vma, src_pmd, addr, false, NULL);
++ __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
+ return -EAGAIN;
+ }
+
+@@ -1109,8 +1105,9 @@ int copy_huge_pmd(struct mm_struct *dst_
+ out_zero_page:
+ mm_inc_nr_ptes(dst_mm);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+-
+ pmdp_set_wrprotect(src_mm, addr, src_pmd);
++ if (!userfaultfd_wp(dst_vma))
++ pmd = pmd_clear_uffd_wp(pmd);
+ pmd = pmd_mkold(pmd_wrprotect(pmd));
+ set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+
+@@ -1829,6 +1826,8 @@ int change_huge_pmd(struct vm_area_struc
+ newpmd = swp_entry_to_pmd(entry);
+ if (pmd_swp_soft_dirty(*pmd))
+ newpmd = pmd_swp_mksoft_dirty(newpmd);
++ if (pmd_swp_uffd_wp(*pmd))
++ newpmd = pmd_swp_mkuffd_wp(newpmd);
+ set_pmd_at(mm, addr, pmd, newpmd);
+ }
+ goto unlock;
+@@ -2995,6 +2994,8 @@ void remove_migration_pmd(struct page_vm
+ pmde = pmd_mksoft_dirty(pmde);
+ if (is_write_migration_entry(entry))
+ pmde = maybe_pmd_mkwrite(pmde, vma);
++ if (pmd_swp_uffd_wp(*pvmw->pmd))
++ pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
+
+ flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
+ if (PageAnon(new))
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -696,10 +696,10 @@ out:
+
+ static unsigned long
+ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+- pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
+- unsigned long addr, int *rss)
++ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
++ struct vm_area_struct *src_vma, unsigned long addr, int *rss)
+ {
+- unsigned long vm_flags = vma->vm_flags;
++ unsigned long vm_flags = dst_vma->vm_flags;
+ pte_t pte = *src_pte;
+ struct page *page;
+ swp_entry_t entry = pte_to_swp_entry(pte);
+@@ -768,6 +768,8 @@ copy_nonpresent_pte(struct mm_struct *ds
+ set_pte_at(src_mm, addr, src_pte, pte);
+ }
+ }
++ if (!userfaultfd_wp(dst_vma))
++ pte = pte_swp_clear_uffd_wp(pte);
+ set_pte_at(dst_mm, addr, dst_pte, pte);
+ return 0;
+ }
+@@ -839,6 +841,9 @@ copy_present_page(struct vm_area_struct
+ /* All done, just insert the new page copy in the child */
+ pte = mk_pte(new_page, dst_vma->vm_page_prot);
+ pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
++ if (userfaultfd_pte_wp(dst_vma, *src_pte))
++ /* Uffd-wp needs to be delivered to dest pte as well */
++ pte = pte_wrprotect(pte_mkuffd_wp(pte));
+ set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+ return 0;
+ }
+@@ -888,12 +893,7 @@ copy_present_pte(struct vm_area_struct *
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+
+- /*
+- * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
+- * does not have the VM_UFFD_WP, which means that the uffd
+- * fork event is not enabled.
+- */
+- if (!(vm_flags & VM_UFFD_WP))
++ if (!userfaultfd_wp(dst_vma))
+ pte = pte_clear_uffd_wp(pte);
+
+ set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+@@ -968,7 +968,8 @@ again:
+ if (unlikely(!pte_present(*src_pte))) {
+ entry.val = copy_nonpresent_pte(dst_mm, src_mm,
+ dst_pte, src_pte,
+- src_vma, addr, rss);
++ dst_vma, src_vma,
++ addr, rss);
+ if (entry.val)
+ break;
+ progress += 8;
+@@ -1045,8 +1046,8 @@ copy_pmd_range(struct vm_area_struct *ds
+ || pmd_devmap(*src_pmd)) {
+ int err;
+ VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
+- err = copy_huge_pmd(dst_mm, src_mm,
+- dst_pmd, src_pmd, addr, src_vma);
++ err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
++ addr, dst_vma, src_vma);
+ if (err == -ENOMEM)
+ return -ENOMEM;
+ if (!err)
firmware-turris-mox-rwtm-add-marvell-armada-3700-rwt.patch
arm64-dts-marvell-armada-37xx-move-firmware-node-to-.patch
revert-swap-fix-do_swap_page-race-with-swapoff.patch
+f2fs-show-casefolding-support-only-when-supported.patch
+mm-thp-simplify-copying-of-huge-zero-page-pmd-when-fork.patch
+mm-userfaultfd-fix-uffd-wp-special-cases-for-fork.patch
+mm-page_alloc-fix-memory-map-initialization-for-descending-nodes.patch
+usb-cdns3-enable-tdl_chk-only-for-out-ep.patch
revert-mm-shmem-fix-shmem_swapin-race-with-swapoff.patch
--- /dev/null
+From d6eef886903c4bb5af41b9a31d4ba11dc7a6f8e8 Mon Sep 17 00:00:00 2001
+From: Sanket Parmar <sparmar@cadence.com>
+Date: Mon, 17 May 2021 17:05:12 +0200
+Subject: usb: cdns3: Enable TDL_CHK only for OUT ep
+
+From: Sanket Parmar <sparmar@cadence.com>
+
+commit d6eef886903c4bb5af41b9a31d4ba11dc7a6f8e8 upstream.
+
+ZLP gets stuck if TDL_CHK bit is set and TDL_FROM_TRB is used
+as TDL source for IN endpoints. To fix it, TDL_CHK is only
+enabled for OUT endpoints.
+
+Fixes: 7733f6c32e36 ("usb: cdns3: Add Cadence USB3 DRD Driver")
+Reported-by: Aswath Govindraju <a-govindraju@ti.com>
+Signed-off-by: Sanket Parmar <sparmar@cadence.com>
+Link: https://lore.kernel.org/r/1621263912-13175-1-git-send-email-sparmar@cadence.com
+Signed-off-by: Peter Chen <peter.chen@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/cdns3/gadget.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -2006,7 +2006,7 @@ static void cdns3_configure_dmult(struct
+ else
+ mask = BIT(priv_ep->num);
+
+- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
++ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
+ cdns3_set_register_bit(®s->tdl_from_trb, mask);
+ cdns3_set_register_bit(®s->tdl_beh, mask);
+ cdns3_set_register_bit(®s->tdl_beh2, mask);
+@@ -2045,15 +2045,13 @@ int cdns3_ep_config(struct cdns3_endpoin
+ case USB_ENDPOINT_XFER_INT:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
+
+- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
+- priv_dev->dev_ver > DEV_VER_V2)
++ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
+ ep_cfg |= EP_CFG_TDL_CHK;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
+
+- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
+- priv_dev->dev_ver > DEV_VER_V2)
++ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
+ ep_cfg |= EP_CFG_TDL_CHK;
+ break;
+ default: