From: Greg Kroah-Hartman Date: Mon, 23 Dec 2024 12:52:51 +0000 (+0100) Subject: 6.12-stable patches X-Git-Tag: v6.1.122~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=73e0702d8cddc4639df54e3f4c54d13b5b57f1f6;p=thirdparty%2Fkernel%2Fstable-queue.git 6.12-stable patches added patches: epoll-add-synchronous-wakeup-support-for-ep_poll_callback.patch mm-convert-partially_mapped-set-clear-operations-to-be-atomic.patch --- diff --git a/queue-6.12/epoll-add-synchronous-wakeup-support-for-ep_poll_callback.patch b/queue-6.12/epoll-add-synchronous-wakeup-support-for-ep_poll_callback.patch new file mode 100644 index 00000000000..29eeb5fa84b --- /dev/null +++ b/queue-6.12/epoll-add-synchronous-wakeup-support-for-ep_poll_callback.patch @@ -0,0 +1,55 @@ +From 900bbaae67e980945dec74d36f8afe0de7556d5a Mon Sep 17 00:00:00 2001 +From: Xuewen Yan +Date: Fri, 26 Apr 2024 16:05:48 +0800 +Subject: epoll: Add synchronous wakeup support for ep_poll_callback + +From: Xuewen Yan + +commit 900bbaae67e980945dec74d36f8afe0de7556d5a upstream. + +Now, the epoll only use wake_up() interface to wake up task. +However, sometimes, there are epoll users which want to use +the synchronous wakeup flag to hint the scheduler, such as +Android binder driver. +So add a wake_up_sync() define, and use the wake_up_sync() +when the sync is true in ep_poll_callback(). + +Co-developed-by: Jing Xia +Signed-off-by: Jing Xia +Signed-off-by: Xuewen Yan +Link: https://lore.kernel.org/r/20240426080548.8203-1-xuewen.yan@unisoc.com +Tested-by: Brian Geffon +Reviewed-by: Brian Geffon +Reported-by: Benoit Lize +Signed-off-by: Christian Brauner +Cc: Brian Geffon +Signed-off-by: Greg Kroah-Hartman +--- + fs/eventpoll.c | 5 ++++- + include/linux/wait.h | 1 + + 2 files changed, 5 insertions(+), 1 deletion(-) + +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1373,7 +1373,10 @@ static int ep_poll_callback(wait_queue_e + break; + } + } +- wake_up(&ep->wq); ++ if (sync) ++ wake_up_sync(&ep->wq); ++ else ++ wake_up(&ep->wq); + } + if (waitqueue_active(&ep->poll_wait)) + pwake++; +--- a/include/linux/wait.h ++++ b/include/linux/wait.h +@@ -221,6 +221,7 @@ void __wake_up_pollfree(struct wait_queu + #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) + #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) + #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) ++#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL) + + #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) + #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) diff --git a/queue-6.12/mm-convert-partially_mapped-set-clear-operations-to-be-atomic.patch b/queue-6.12/mm-convert-partially_mapped-set-clear-operations-to-be-atomic.patch new file mode 100644 index 00000000000..54f78157ace --- /dev/null +++ b/queue-6.12/mm-convert-partially_mapped-set-clear-operations-to-be-atomic.patch @@ -0,0 +1,101 @@ +From 42b2eb69835b0fda797f70eb5b4fc213dbe3a7ea Mon Sep 17 00:00:00 2001 +From: Usama Arif +Date: Thu, 12 Dec 2024 18:33:51 +0000 +Subject: mm: convert partially_mapped set/clear operations to be atomic + +From: Usama Arif + +commit 42b2eb69835b0fda797f70eb5b4fc213dbe3a7ea upstream. + +Other page flags in the 2nd page, like PG_hwpoison and PG_anon_exclusive +can get modified concurrently. Changes to other page flags might be lost +if they are happening at the same time as non-atomic partially_mapped +operations. Hence, make partially_mapped operations atomic. + +Link: https://lkml.kernel.org/r/20241212183351.1345389-1-usamaarif642@gmail.com +Fixes: 8422acdc97ed ("mm: introduce a pageflag for partially mapped folios") +Reported-by: David Hildenbrand +Link: https://lore.kernel.org/all/e53b04ad-1827-43a2-a1ab-864c7efecf6e@redhat.com/ +Signed-off-by: Usama Arif +Acked-by: David Hildenbrand +Acked-by: Johannes Weiner +Acked-by: Roman Gushchin +Cc: Barry Song +Cc: Domenico Cerasuolo +Cc: Jonathan Corbet +Cc: Matthew Wilcox +Cc: Mike Rapoport (Microsoft) +Cc: Nico Pache +Cc: Rik van Riel +Cc: Ryan Roberts +Cc: Shakeel Butt +Cc: Yu Zhao +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/page-flags.h | 12 ++---------- + mm/huge_memory.c | 8 ++++---- + 2 files changed, 6 insertions(+), 14 deletions(-) + +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -860,18 +860,10 @@ static inline void ClearPageCompound(str + ClearPageHead(page); + } + FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) +-FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +-/* +- * PG_partially_mapped is protected by deferred_split split_queue_lock, +- * so its safe to use non-atomic set/clear. +- */ +-__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +-__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE) ++FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) + #else + FOLIO_FLAG_FALSE(large_rmappable) +-FOLIO_TEST_FLAG_FALSE(partially_mapped) +-__FOLIO_SET_FLAG_NOOP(partially_mapped) +-__FOLIO_CLEAR_FLAG_NOOP(partially_mapped) ++FOLIO_FLAG_FALSE(partially_mapped) + #endif + + #define PG_head_mask ((1UL << PG_head)) +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -3503,7 +3503,7 @@ int split_huge_page_to_list_to_order(str + !list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + if (folio_test_partially_mapped(folio)) { +- __folio_clear_partially_mapped(folio); ++ folio_clear_partially_mapped(folio); + mod_mthp_stat(folio_order(folio), + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); + } +@@ -3615,7 +3615,7 @@ bool __folio_unqueue_deferred_split(stru + if (!list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + if (folio_test_partially_mapped(folio)) { +- __folio_clear_partially_mapped(folio); ++ folio_clear_partially_mapped(folio); + mod_mthp_stat(folio_order(folio), + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); + } +@@ -3659,7 +3659,7 @@ void deferred_split_folio(struct folio * + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); + if (partially_mapped) { + if (!folio_test_partially_mapped(folio)) { +- __folio_set_partially_mapped(folio); ++ folio_set_partially_mapped(folio); + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_DEFERRED_SPLIT_PAGE); + count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); +@@ -3752,7 +3752,7 @@ static unsigned long deferred_split_scan + } else { + /* We lost race with folio_put() */ + if (folio_test_partially_mapped(folio)) { +- __folio_clear_partially_mapped(folio); ++ folio_clear_partially_mapped(folio); + mod_mthp_stat(folio_order(folio), + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); + } diff --git a/queue-6.12/series b/queue-6.12/series index db4f3cf19bb..e3e816ea1ef 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -156,3 +156,5 @@ ceph-fix-memory-leak-in-ceph_direct_read_write.patch mm-use-aligned-address-in-clear_gigantic_page.patch mm-use-aligned-address-in-copy_user_gigantic_page.patch mm-shmem-fix-shmemhugepages-at-swapout.patch +mm-convert-partially_mapped-set-clear-operations-to-be-atomic.patch +epoll-add-synchronous-wakeup-support-for-ep_poll_callback.patch