From: Greg Kroah-Hartman Date: Sun, 6 Mar 2022 10:47:56 +0000 (+0100) Subject: 4.19-stable patches X-Git-Tag: v4.9.305~26 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=01019bfc72fb2848d157c4008b6280962ba64eb0;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: memfd-fix-f_seal_write-after-shmem-huge-page-allocated.patch --- diff --git a/queue-4.19/memfd-fix-f_seal_write-after-shmem-huge-page-allocated.patch b/queue-4.19/memfd-fix-f_seal_write-after-shmem-huge-page-allocated.patch new file mode 100644 index 00000000000..16725f95db7 --- /dev/null +++ b/queue-4.19/memfd-fix-f_seal_write-after-shmem-huge-page-allocated.patch @@ -0,0 +1,119 @@ +From f2b277c4d1c63a85127e8aa2588e9cc3bd21cb99 Mon Sep 17 00:00:00 2001 +From: Hugh Dickins +Date: Fri, 4 Mar 2022 20:29:01 -0800 +Subject: memfd: fix F_SEAL_WRITE after shmem huge page allocated + +From: Hugh Dickins + +commit f2b277c4d1c63a85127e8aa2588e9cc3bd21cb99 upstream. + +Wangyong reports: after enabling tmpfs filesystem to support transparent +hugepage with the following command: + + echo always > /sys/kernel/mm/transparent_hugepage/shmem_enabled + +the docker program tries to add F_SEAL_WRITE through the following +command, but it fails unexpectedly with errno EBUSY: + + fcntl(5, F_ADD_SEALS, F_SEAL_WRITE) = -1. + +That is because memfd_tag_pins() and memfd_wait_for_pins() were never +updated for shmem huge pages: checking page_mapcount() against +page_count() is hopeless on THP subpages - they need to check +total_mapcount() against page_count() on THP heads only. + +Make memfd_tag_pins() (compared > 1) as strict as memfd_wait_for_pins() +(compared != 1): either can be justified, but given the non-atomic +total_mapcount() calculation, it is better now to be strict. Bear in +mind that total_mapcount() itself scans all of the THP subpages, when +choosing to take an XA_CHECK_SCHED latency break. + +Also fix the unlikely xa_is_value() case in memfd_wait_for_pins(): if a +page has been swapped out since memfd_tag_pins(), then its refcount must +have fallen, and so it can safely be untagged. + +Link: https://lkml.kernel.org/r/a4f79248-df75-2c8c-3df-ba3317ccb5da@google.com +Signed-off-by: Hugh Dickins +Reported-by: Zeal Robot +Reported-by: wangyong +Cc: Mike Kravetz +Cc: Matthew Wilcox (Oracle) +Cc: CGEL ZTE +Cc: Kirill A. Shutemov +Cc: Song Liu +Cc: Yang Yang +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + mm/memfd.c | 30 ++++++++++++++++++++++-------- + 1 file changed, 22 insertions(+), 8 deletions(-) + +--- a/mm/memfd.c ++++ b/mm/memfd.c +@@ -34,26 +34,35 @@ static void memfd_tag_pins(struct addres + void __rcu **slot; + pgoff_t start; + struct page *page; +- unsigned int tagged = 0; ++ int latency = 0; ++ int cache_count; + + lru_add_drain(); + start = 0; + + xa_lock_irq(&mapping->i_pages); + radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { ++ cache_count = 1; + page = radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); +- if (!page || radix_tree_exception(page)) { ++ if (!page || radix_tree_exception(page) || PageTail(page)) { + if (radix_tree_deref_retry(page)) { + slot = radix_tree_iter_retry(&iter); + continue; + } +- } else if (page_count(page) - page_mapcount(page) > 1) { +- radix_tree_tag_set(&mapping->i_pages, iter.index, +- MEMFD_TAG_PINNED); ++ } else { ++ if (PageTransHuge(page) && !PageHuge(page)) ++ cache_count = HPAGE_PMD_NR; ++ if (cache_count != ++ page_count(page) - total_mapcount(page)) { ++ radix_tree_tag_set(&mapping->i_pages, ++ iter.index, MEMFD_TAG_PINNED); ++ } + } + +- if (++tagged % 1024) ++ latency += cache_count; ++ if (latency < 1024) + continue; ++ latency = 0; + + slot = radix_tree_iter_resume(slot, &iter); + xa_unlock_irq(&mapping->i_pages); +@@ -79,6 +88,7 @@ static int memfd_wait_for_pins(struct ad + pgoff_t start; + struct page *page; + int error, scan; ++ int cache_count; + + memfd_tag_pins(mapping); + +@@ -107,8 +117,12 @@ static int memfd_wait_for_pins(struct ad + page = NULL; + } + +- if (page && +- page_count(page) - page_mapcount(page) != 1) { ++ cache_count = 1; ++ if (page && PageTransHuge(page) && !PageHuge(page)) ++ cache_count = HPAGE_PMD_NR; ++ ++ if (page && cache_count != ++ page_count(page) - total_mapcount(page)) { + if (scan < LAST_SCAN) + goto continue_resched; + diff --git a/queue-4.19/series b/queue-4.19/series index 12dde660204..d84cd66542b 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -45,3 +45,4 @@ nl80211-handle-nla_memdup-failures-in-handle_nan_fil.patch input-elan_i2c-move-regulator_able-out-of-elan_able_power.patch input-elan_i2c-fix-regulator-enable-count-imbalance-after-suspend-resume.patch hid-add-mapping-for-key_all_applications.patch +memfd-fix-f_seal_write-after-shmem-huge-page-allocated.patch