From: Greg Kroah-Hartman Date: Mon, 29 Jul 2024 07:44:48 +0000 (+0200) Subject: 6.6-stable patches X-Git-Tag: v6.1.103~97 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=ae586d69b428d74d9ee8472a8fc5a80df87739e0;p=thirdparty%2Fkernel%2Fstable-queue.git 6.6-stable patches added patches: hugetlb-force-allocating-surplus-hugepages-on-mempolicy-allowed-nodes.patch landlock-don-t-lose-track-of-restrictions-on-cred_transfer.patch mm-hugetlb-fix-possible-recursive-locking-detected-warning.patch mm-mglru-fix-div-by-zero-in-vmpressure_calc_level.patch mm-mglru-fix-overshooting-shrinker-memory.patch mm-mmap_lock-replace-get_memcg_path_buf-with-on-stack-buffer.patch x86-efistub-avoid-returning-efi_success-on-error.patch x86-efistub-revert-to-heap-allocated-boot_params-for-pe-entrypoint.patch --- diff --git a/queue-6.6/hugetlb-force-allocating-surplus-hugepages-on-mempolicy-allowed-nodes.patch b/queue-6.6/hugetlb-force-allocating-surplus-hugepages-on-mempolicy-allowed-nodes.patch new file mode 100644 index 00000000000..7915e83cdfd --- /dev/null +++ b/queue-6.6/hugetlb-force-allocating-surplus-hugepages-on-mempolicy-allowed-nodes.patch @@ -0,0 +1,131 @@ +From 003af997c8a945493859dd1a2d015cc9387ff27a Mon Sep 17 00:00:00 2001 +From: Aristeu Rozanski +Date: Fri, 21 Jun 2024 15:00:50 -0400 +Subject: hugetlb: force allocating surplus hugepages on mempolicy allowed nodes + +From: Aristeu Rozanski + +commit 003af997c8a945493859dd1a2d015cc9387ff27a upstream. + +When trying to allocate a hugepage with no reserved ones free, it may be +allowed in case a number of overcommit hugepages was configured (using +/proc/sys/vm/nr_overcommit_hugepages) and that number wasn't reached. +This allows for a behavior of having extra hugepages allocated +dynamically, if there're resources for it. Some sysadmins even prefer not +reserving any hugepages and setting a big number of overcommit hugepages. + +But while attempting to allocate overcommit hugepages in a multi node +system (either NUMA or mempolicy/cpuset) said allocations might randomly +fail even when there're resources available for the allocation. + +This happens due to allowed_mems_nr() only accounting for the number of +free hugepages in the nodes the current process belongs to and the surplus +hugepage allocation is done so it can be allocated in any node. In case +one or more of the requested surplus hugepages are allocated in a +different node, the whole allocation will fail due allowed_mems_nr() +returning a lower value. + +So allocate surplus hugepages in one of the nodes the current process +belongs to. + +Easy way to reproduce this issue is to use a 2+ NUMA nodes system: + + # echo 0 >/proc/sys/vm/nr_hugepages + # echo 1 >/proc/sys/vm/nr_overcommit_hugepages + # numactl -m0 ./tools/testing/selftests/mm/map_hugetlb 2 + +Repeating the execution of map_hugetlb test application will eventually +fail when the hugepage ends up allocated in a different node. + +[aris@ruivo.org: v2] + Link: https://lkml.kernel.org/r/20240701212343.GG844599@cathedrallabs.org +Link: https://lkml.kernel.org/r/20240621190050.mhxwb65zn37doegp@redhat.com +Signed-off-by: Aristeu Rozanski +Cc: Muchun Song +Cc: Aristeu Rozanski +Cc: David Hildenbrand +Cc: Vishal Moola +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 47 ++++++++++++++++++++++++++++------------------- + 1 file changed, 28 insertions(+), 19 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2518,6 +2518,23 @@ struct folio *alloc_hugetlb_folio_vma(st + return folio; + } + ++static nodemask_t *policy_mbind_nodemask(gfp_t gfp) ++{ ++#ifdef CONFIG_NUMA ++ struct mempolicy *mpol = get_task_policy(current); ++ ++ /* ++ * Only enforce MPOL_BIND policy which overlaps with cpuset policy ++ * (from policy_nodemask) specifically for hugetlb case ++ */ ++ if (mpol->mode == MPOL_BIND && ++ (apply_policy_zone(mpol, gfp_zone(gfp)) && ++ cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) ++ return &mpol->nodes; ++#endif ++ return NULL; ++} ++ + /* + * Increase the hugetlb pool such that it can accommodate a reservation + * of size 'delta'. +@@ -2531,6 +2548,8 @@ static int gather_surplus_pages(struct h + long i; + long needed, allocated; + bool alloc_ok = true; ++ int node; ++ nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); + + lockdep_assert_held(&hugetlb_lock); + needed = (h->resv_huge_pages + delta) - h->free_huge_pages; +@@ -2545,8 +2564,15 @@ static int gather_surplus_pages(struct h + retry: + spin_unlock_irq(&hugetlb_lock); + for (i = 0; i < needed; i++) { +- folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), +- NUMA_NO_NODE, NULL); ++ folio = NULL; ++ for_each_node_mask(node, cpuset_current_mems_allowed) { ++ if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { ++ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), ++ node, NULL); ++ if (folio) ++ break; ++ } ++ } + if (!folio) { + alloc_ok = false; + break; +@@ -4531,23 +4557,6 @@ static int __init default_hugepagesz_set + } + __setup("default_hugepagesz=", default_hugepagesz_setup); + +-static nodemask_t *policy_mbind_nodemask(gfp_t gfp) +-{ +-#ifdef CONFIG_NUMA +- struct mempolicy *mpol = get_task_policy(current); +- +- /* +- * Only enforce MPOL_BIND policy which overlaps with cpuset policy +- * (from policy_nodemask) specifically for hugetlb case +- */ +- if (mpol->mode == MPOL_BIND && +- (apply_policy_zone(mpol, gfp_zone(gfp)) && +- cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) +- return &mpol->nodes; +-#endif +- return NULL; +-} +- + static unsigned int allowed_mems_nr(struct hstate *h) + { + int node; diff --git a/queue-6.6/landlock-don-t-lose-track-of-restrictions-on-cred_transfer.patch b/queue-6.6/landlock-don-t-lose-track-of-restrictions-on-cred_transfer.patch new file mode 100644 index 00000000000..76236deda78 --- /dev/null +++ b/queue-6.6/landlock-don-t-lose-track-of-restrictions-on-cred_transfer.patch @@ -0,0 +1,72 @@ +From 39705a6c29f8a2b93cf5b99528a55366c50014d1 Mon Sep 17 00:00:00 2001 +From: Jann Horn +Date: Wed, 24 Jul 2024 14:49:01 +0200 +Subject: landlock: Don't lose track of restrictions on cred_transfer +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jann Horn + +commit 39705a6c29f8a2b93cf5b99528a55366c50014d1 upstream. + +When a process' cred struct is replaced, this _almost_ always invokes +the cred_prepare LSM hook; but in one special case (when +KEYCTL_SESSION_TO_PARENT updates the parent's credentials), the +cred_transfer LSM hook is used instead. Landlock only implements the +cred_prepare hook, not cred_transfer, so KEYCTL_SESSION_TO_PARENT causes +all information on Landlock restrictions to be lost. + +This basically means that a process with the ability to use the fork() +and keyctl() syscalls can get rid of all Landlock restrictions on +itself. + +Fix it by adding a cred_transfer hook that does the same thing as the +existing cred_prepare hook. (Implemented by having hook_cred_prepare() +call hook_cred_transfer() so that the two functions are less likely to +accidentally diverge in the future.) + +Cc: stable@kernel.org +Fixes: 385975dca53e ("landlock: Set up the security framework and manage credentials") +Signed-off-by: Jann Horn +Link: https://lore.kernel.org/r/20240724-landlock-houdini-fix-v1-1-df89a4560ca3@google.com +Signed-off-by: Mickaël Salaün +Signed-off-by: Greg Kroah-Hartman +--- + security/landlock/cred.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +--- a/security/landlock/cred.c ++++ b/security/landlock/cred.c +@@ -14,8 +14,8 @@ + #include "ruleset.h" + #include "setup.h" + +-static int hook_cred_prepare(struct cred *const new, +- const struct cred *const old, const gfp_t gfp) ++static void hook_cred_transfer(struct cred *const new, ++ const struct cred *const old) + { + struct landlock_ruleset *const old_dom = landlock_cred(old)->domain; + +@@ -23,6 +23,12 @@ static int hook_cred_prepare(struct cred + landlock_get_ruleset(old_dom); + landlock_cred(new)->domain = old_dom; + } ++} ++ ++static int hook_cred_prepare(struct cred *const new, ++ const struct cred *const old, const gfp_t gfp) ++{ ++ hook_cred_transfer(new, old); + return 0; + } + +@@ -36,6 +42,7 @@ static void hook_cred_free(struct cred * + + static struct security_hook_list landlock_hooks[] __ro_after_init = { + LSM_HOOK_INIT(cred_prepare, hook_cred_prepare), ++ LSM_HOOK_INIT(cred_transfer, hook_cred_transfer), + LSM_HOOK_INIT(cred_free, hook_cred_free), + }; + diff --git a/queue-6.6/mm-hugetlb-fix-possible-recursive-locking-detected-warning.patch b/queue-6.6/mm-hugetlb-fix-possible-recursive-locking-detected-warning.patch new file mode 100644 index 00000000000..529457fd57c --- /dev/null +++ b/queue-6.6/mm-hugetlb-fix-possible-recursive-locking-detected-warning.patch @@ -0,0 +1,100 @@ +From 667574e873b5f77a220b2a93329689f36fb56d5d Mon Sep 17 00:00:00 2001 +From: Miaohe Lin +Date: Fri, 12 Jul 2024 11:13:14 +0800 +Subject: mm/hugetlb: fix possible recursive locking detected warning + +From: Miaohe Lin + +commit 667574e873b5f77a220b2a93329689f36fb56d5d upstream. + +When tries to demote 1G hugetlb folios, a lockdep warning is observed: + +============================================ +WARNING: possible recursive locking detected +6.10.0-rc6-00452-ga4d0275fa660-dirty #79 Not tainted +-------------------------------------------- +bash/710 is trying to acquire lock: +ffffffff8f0a7850 (&h->resize_lock){+.+.}-{3:3}, at: demote_store+0x244/0x460 + +but task is already holding lock: +ffffffff8f0a6f48 (&h->resize_lock){+.+.}-{3:3}, at: demote_store+0xae/0x460 + +other info that might help us debug this: + Possible unsafe locking scenario: + + CPU0 + ---- + lock(&h->resize_lock); + lock(&h->resize_lock); + + *** DEADLOCK *** + + May be due to missing lock nesting notation + +4 locks held by bash/710: + #0: ffff8f118439c3f0 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x64/0xe0 + #1: ffff8f11893b9e88 (&of->mutex#2){+.+.}-{3:3}, at: kernfs_fop_write_iter+0xf8/0x1d0 + #2: ffff8f1183dc4428 (kn->active#98){.+.+}-{0:0}, at: kernfs_fop_write_iter+0x100/0x1d0 + #3: ffffffff8f0a6f48 (&h->resize_lock){+.+.}-{3:3}, at: demote_store+0xae/0x460 + +stack backtrace: +CPU: 3 PID: 710 Comm: bash Not tainted 6.10.0-rc6-00452-ga4d0275fa660-dirty #79 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 +Call Trace: + + dump_stack_lvl+0x68/0xa0 + __lock_acquire+0x10f2/0x1ca0 + lock_acquire+0xbe/0x2d0 + __mutex_lock+0x6d/0x400 + demote_store+0x244/0x460 + kernfs_fop_write_iter+0x12c/0x1d0 + vfs_write+0x380/0x540 + ksys_write+0x64/0xe0 + do_syscall_64+0xb9/0x1d0 + entry_SYSCALL_64_after_hwframe+0x77/0x7f +RIP: 0033:0x7fa61db14887 +RSP: 002b:00007ffc56c48358 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 +RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007fa61db14887 +RDX: 0000000000000002 RSI: 000055a030050220 RDI: 0000000000000001 +RBP: 000055a030050220 R08: 00007fa61dbd1460 R09: 000000007fffffff +R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000002 +R13: 00007fa61dc1b780 R14: 00007fa61dc17600 R15: 00007fa61dc16a00 + + +Lockdep considers this an AA deadlock because the different resize_lock +mutexes reside in the same lockdep class, but this is a false positive. +Place them in distinct classes to avoid these warnings. + +Link: https://lkml.kernel.org/r/20240712031314.2570452-1-linmiaohe@huawei.com +Fixes: 8531fc6f52f5 ("hugetlb: add hugetlb demote page support") +Signed-off-by: Miaohe Lin +Acked-by: Muchun Song +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/hugetlb.h | 1 + + mm/hugetlb.c | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -713,6 +713,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unre + /* Defines one hugetlb page size */ + struct hstate { + struct mutex resize_lock; ++ struct lock_class_key resize_key; + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4334,7 +4334,7 @@ void __init hugetlb_add_hstate(unsigned + BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); + BUG_ON(order == 0); + h = &hstates[hugetlb_max_hstate++]; +- mutex_init(&h->resize_lock); ++ __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); + h->order = order; + h->mask = ~(huge_page_size(h) - 1); + for (i = 0; i < MAX_NUMNODES; ++i) diff --git a/queue-6.6/mm-mglru-fix-div-by-zero-in-vmpressure_calc_level.patch b/queue-6.6/mm-mglru-fix-div-by-zero-in-vmpressure_calc_level.patch new file mode 100644 index 00000000000..1580ef784a8 --- /dev/null +++ b/queue-6.6/mm-mglru-fix-div-by-zero-in-vmpressure_calc_level.patch @@ -0,0 +1,51 @@ +From 8b671fe1a879923ecfb72dda6caf01460dd885ef Mon Sep 17 00:00:00 2001 +From: Yu Zhao +Date: Thu, 11 Jul 2024 13:19:56 -0600 +Subject: mm/mglru: fix div-by-zero in vmpressure_calc_level() + +From: Yu Zhao + +commit 8b671fe1a879923ecfb72dda6caf01460dd885ef upstream. + +evict_folios() uses a second pass to reclaim folios that have gone through +page writeback and become clean before it finishes the first pass, since +folio_rotate_reclaimable() cannot handle those folios due to the +isolation. + +The second pass tries to avoid potential double counting by deducting +scan_control->nr_scanned. However, this can result in underflow of +nr_scanned, under a condition where shrink_folio_list() does not increment +nr_scanned, i.e., when folio_trylock() fails. + +The underflow can cause the divisor, i.e., scale=scanned+reclaimed in +vmpressure_calc_level(), to become zero, resulting in the following crash: + + [exception RIP: vmpressure_work_fn+101] + process_one_work at ffffffffa3313f2b + +Since scan_control->nr_scanned has no established semantics, the potential +double counting has minimal risks. Therefore, fix the problem by not +deducting scan_control->nr_scanned in evict_folios(). + +Link: https://lkml.kernel.org/r/20240711191957.939105-1-yuzhao@google.com +Fixes: 359a5e1416ca ("mm: multi-gen LRU: retry folios written back while isolated") +Reported-by: Wei Xu +Signed-off-by: Yu Zhao +Cc: Alexander Motin +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/vmscan.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -5226,7 +5226,6 @@ retry: + + /* retry folios that may have missed folio_rotate_reclaimable() */ + list_move(&folio->lru, &clean); +- sc->nr_scanned -= folio_nr_pages(folio); + } + + spin_lock_irq(&lruvec->lru_lock); diff --git a/queue-6.6/mm-mglru-fix-overshooting-shrinker-memory.patch b/queue-6.6/mm-mglru-fix-overshooting-shrinker-memory.patch new file mode 100644 index 00000000000..47f6aabfcae --- /dev/null +++ b/queue-6.6/mm-mglru-fix-overshooting-shrinker-memory.patch @@ -0,0 +1,89 @@ +From 3f74e6bd3b84a8b6bb3cc51609c89e5b9d58eed7 Mon Sep 17 00:00:00 2001 +From: Yu Zhao +Date: Thu, 11 Jul 2024 13:19:57 -0600 +Subject: mm/mglru: fix overshooting shrinker memory + +From: Yu Zhao + +commit 3f74e6bd3b84a8b6bb3cc51609c89e5b9d58eed7 upstream. + +set_initial_priority() tries to jump-start global reclaim by estimating +the priority based on cold/hot LRU pages. The estimation does not account +for shrinker objects, and it cannot do so because their sizes can be in +different units other than page. + +If shrinker objects are the majority, e.g., on TrueNAS SCALE 24.04.0 where +ZFS ARC can use almost all system memory, set_initial_priority() can +vastly underestimate how much memory ARC shrinker can evict and assign +extreme low values to scan_control->priority, resulting in overshoots of +shrinker objects. + +To reproduce the problem, using TrueNAS SCALE 24.04.0 with 32GB DRAM, a +test ZFS pool and the following commands: + + fio --name=mglru.file --numjobs=36 --ioengine=io_uring \ + --directory=/root/test-zfs-pool/ --size=1024m --buffered=1 \ + --rw=randread --random_distribution=random \ + --time_based --runtime=1h & + + for ((i = 0; i < 20; i++)) + do + sleep 120 + fio --name=mglru.anon --numjobs=16 --ioengine=mmap \ + --filename=/dev/zero --size=1024m --fadvise_hint=0 \ + --rw=randrw --random_distribution=random \ + --time_based --runtime=1m + done + +To fix the problem: +1. Cap scan_control->priority at or above DEF_PRIORITY/2, to prevent + the jump-start from being overly aggressive. +2. Account for the progress from mm_account_reclaimed_pages(), to + prevent kswapd_shrink_node() from raising the priority + unnecessarily. + +Link: https://lkml.kernel.org/r/20240711191957.939105-2-yuzhao@google.com +Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists") +Signed-off-by: Yu Zhao +Reported-by: Alexander Motin +Cc: Wei Xu +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/vmscan.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -5585,7 +5585,11 @@ static void set_initial_priority(struct + /* round down reclaimable and round up sc->nr_to_reclaim */ + priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); + +- sc->priority = clamp(priority, 0, DEF_PRIORITY); ++ /* ++ * The estimation is based on LRU pages only, so cap it to prevent ++ * overshoots of shrinker objects by large margins. ++ */ ++ sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); + } + + static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -7350,6 +7354,7 @@ static bool kswapd_shrink_node(pg_data_t + { + struct zone *zone; + int z; ++ unsigned long nr_reclaimed = sc->nr_reclaimed; + + /* Reclaim a number of pages proportional to the number of zones */ + sc->nr_to_reclaim = 0; +@@ -7377,7 +7382,8 @@ static bool kswapd_shrink_node(pg_data_t + if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) + sc->order = 0; + +- return sc->nr_scanned >= sc->nr_to_reclaim; ++ /* account for progress from mm_account_reclaimed_pages() */ ++ return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; + } + + /* Page allocator PCP high watermark is lowered if reclaim is active. */ diff --git a/queue-6.6/mm-mmap_lock-replace-get_memcg_path_buf-with-on-stack-buffer.patch b/queue-6.6/mm-mmap_lock-replace-get_memcg_path_buf-with-on-stack-buffer.patch new file mode 100644 index 00000000000..8b9335a1864 --- /dev/null +++ b/queue-6.6/mm-mmap_lock-replace-get_memcg_path_buf-with-on-stack-buffer.patch @@ -0,0 +1,262 @@ +From 7d6be67cfdd4a53cea7147313ca13c531e3a470f Mon Sep 17 00:00:00 2001 +From: Tetsuo Handa +Date: Fri, 21 Jun 2024 10:08:41 +0900 +Subject: mm: mmap_lock: replace get_memcg_path_buf() with on-stack buffer + +From: Tetsuo Handa + +commit 7d6be67cfdd4a53cea7147313ca13c531e3a470f upstream. + +Commit 2b5067a8143e ("mm: mmap_lock: add tracepoints around lock +acquisition") introduced TRACE_MMAP_LOCK_EVENT() macro using +preempt_disable() in order to let get_mm_memcg_path() return a percpu +buffer exclusively used by normal, softirq, irq and NMI contexts +respectively. + +Commit 832b50725373 ("mm: mmap_lock: use local locks instead of disabling +preemption") replaced preempt_disable() with local_lock(&memcg_paths.lock) +based on an argument that preempt_disable() has to be avoided because +get_mm_memcg_path() might sleep if PREEMPT_RT=y. + +But syzbot started reporting + + inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. + +and + + inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. + +messages, for local_lock() does not disable IRQ. + +We could replace local_lock() with local_lock_irqsave() in order to +suppress these messages. But this patch instead replaces percpu buffers +with on-stack buffer, for the size of each buffer returned by +get_memcg_path_buf() is only 256 bytes which is tolerable for allocating +from current thread's kernel stack memory. + +Link: https://lkml.kernel.org/r/ef22d289-eadb-4ed9-863b-fbc922b33d8d@I-love.SAKURA.ne.jp +Reported-by: syzbot +Closes: https://syzkaller.appspot.com/bug?extid=40905bca570ae6784745 +Fixes: 832b50725373 ("mm: mmap_lock: use local locks instead of disabling preemption") +Signed-off-by: Tetsuo Handa +Reviewed-by: Axel Rasmussen +Cc: Nicolas Saenz Julienne +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + mm/mmap_lock.c | 175 ++++++--------------------------------------------------- + 1 file changed, 20 insertions(+), 155 deletions(-) + +--- a/mm/mmap_lock.c ++++ b/mm/mmap_lock.c +@@ -19,14 +19,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_relea + + #ifdef CONFIG_MEMCG + +-/* +- * Our various events all share the same buffer (because we don't want or need +- * to allocate a set of buffers *per event type*), so we need to protect against +- * concurrent _reg() and _unreg() calls, and count how many _reg() calls have +- * been made. +- */ +-static DEFINE_MUTEX(reg_lock); +-static int reg_refcount; /* Protected by reg_lock. */ ++static atomic_t reg_refcount; + + /* + * Size of the buffer for memcg path names. Ignoring stack trace support, +@@ -34,136 +27,22 @@ static int reg_refcount; /* Protected by + */ + #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL + +-/* +- * How many contexts our trace events might be called in: normal, softirq, irq, +- * and NMI. +- */ +-#define CONTEXT_COUNT 4 +- +-struct memcg_path { +- local_lock_t lock; +- char __rcu *buf; +- local_t buf_idx; +-}; +-static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = { +- .lock = INIT_LOCAL_LOCK(lock), +- .buf_idx = LOCAL_INIT(0), +-}; +- +-static char **tmp_bufs; +- +-/* Called with reg_lock held. */ +-static void free_memcg_path_bufs(void) +-{ +- struct memcg_path *memcg_path; +- int cpu; +- char **old = tmp_bufs; +- +- for_each_possible_cpu(cpu) { +- memcg_path = per_cpu_ptr(&memcg_paths, cpu); +- *(old++) = rcu_dereference_protected(memcg_path->buf, +- lockdep_is_held(®_lock)); +- rcu_assign_pointer(memcg_path->buf, NULL); +- } +- +- /* Wait for inflight memcg_path_buf users to finish. */ +- synchronize_rcu(); +- +- old = tmp_bufs; +- for_each_possible_cpu(cpu) { +- kfree(*(old++)); +- } +- +- kfree(tmp_bufs); +- tmp_bufs = NULL; +-} +- + int trace_mmap_lock_reg(void) + { +- int cpu; +- char *new; +- +- mutex_lock(®_lock); +- +- /* If the refcount is going 0->1, proceed with allocating buffers. */ +- if (reg_refcount++) +- goto out; +- +- tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs), +- GFP_KERNEL); +- if (tmp_bufs == NULL) +- goto out_fail; +- +- for_each_possible_cpu(cpu) { +- new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL); +- if (new == NULL) +- goto out_fail_free; +- rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new); +- /* Don't need to wait for inflights, they'd have gotten NULL. */ +- } +- +-out: +- mutex_unlock(®_lock); ++ atomic_inc(®_refcount); + return 0; +- +-out_fail_free: +- free_memcg_path_bufs(); +-out_fail: +- /* Since we failed, undo the earlier ref increment. */ +- --reg_refcount; +- +- mutex_unlock(®_lock); +- return -ENOMEM; + } + + void trace_mmap_lock_unreg(void) + { +- mutex_lock(®_lock); +- +- /* If the refcount is going 1->0, proceed with freeing buffers. */ +- if (--reg_refcount) +- goto out; +- +- free_memcg_path_bufs(); +- +-out: +- mutex_unlock(®_lock); +-} +- +-static inline char *get_memcg_path_buf(void) +-{ +- struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths); +- char *buf; +- int idx; +- +- rcu_read_lock(); +- buf = rcu_dereference(memcg_path->buf); +- if (buf == NULL) { +- rcu_read_unlock(); +- return NULL; +- } +- idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) - +- MEMCG_PATH_BUF_SIZE; +- return &buf[idx]; ++ atomic_dec(®_refcount); + } + +-static inline void put_memcg_path_buf(void) +-{ +- local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx); +- rcu_read_unlock(); +-} +- +-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ +- do { \ +- const char *memcg_path; \ +- local_lock(&memcg_paths.lock); \ +- memcg_path = get_mm_memcg_path(mm); \ +- trace_mmap_lock_##type(mm, \ +- memcg_path != NULL ? memcg_path : "", \ +- ##__VA_ARGS__); \ +- if (likely(memcg_path != NULL)) \ +- put_memcg_path_buf(); \ +- local_unlock(&memcg_paths.lock); \ ++#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ ++ do { \ ++ char buf[MEMCG_PATH_BUF_SIZE]; \ ++ get_mm_memcg_path(mm, buf, sizeof(buf)); \ ++ trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \ + } while (0) + + #else /* !CONFIG_MEMCG */ +@@ -185,37 +64,23 @@ void trace_mmap_lock_unreg(void) + #ifdef CONFIG_TRACING + #ifdef CONFIG_MEMCG + /* +- * Write the given mm_struct's memcg path to a percpu buffer, and return a +- * pointer to it. If the path cannot be determined, or no buffer was available +- * (because the trace event is being unregistered), NULL is returned. +- * +- * Note: buffers are allocated per-cpu to avoid locking, so preemption must be +- * disabled by the caller before calling us, and re-enabled only after the +- * caller is done with the pointer. +- * +- * The caller must call put_memcg_path_buf() once the buffer is no longer +- * needed. This must be done while preemption is still disabled. ++ * Write the given mm_struct's memcg path to a buffer. If the path cannot be ++ * determined or the trace event is being unregistered, empty string is written. + */ +-static const char *get_mm_memcg_path(struct mm_struct *mm) ++static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen) + { +- char *buf = NULL; +- struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); ++ struct mem_cgroup *memcg; + ++ buf[0] = '\0'; ++ /* No need to get path if no trace event is registered. */ ++ if (!atomic_read(®_refcount)) ++ return; ++ memcg = get_mem_cgroup_from_mm(mm); + if (memcg == NULL) +- goto out; +- if (unlikely(memcg->css.cgroup == NULL)) +- goto out_put; +- +- buf = get_memcg_path_buf(); +- if (buf == NULL) +- goto out_put; +- +- cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); +- +-out_put: ++ return; ++ if (memcg->css.cgroup) ++ cgroup_path(memcg->css.cgroup, buf, buflen); + css_put(&memcg->css); +-out: +- return buf; + } + + #endif /* CONFIG_MEMCG */ diff --git a/queue-6.6/series b/queue-6.6/series index 901eeb1d49c..d3bce88fc8e 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -355,3 +355,11 @@ s390-dasd-fix-error-checks-in-dasd_copy_pair_store.patch sbitmap-use-read_once-to-access-map-word.patch sbitmap-fix-io-hung-due-to-race-on-sbitmap_word-clea.patch loongarch-check-tif_load_watch-to-enable-user-space-.patch +landlock-don-t-lose-track-of-restrictions-on-cred_transfer.patch +hugetlb-force-allocating-surplus-hugepages-on-mempolicy-allowed-nodes.patch +mm-hugetlb-fix-possible-recursive-locking-detected-warning.patch +mm-mglru-fix-div-by-zero-in-vmpressure_calc_level.patch +mm-mmap_lock-replace-get_memcg_path_buf-with-on-stack-buffer.patch +mm-mglru-fix-overshooting-shrinker-memory.patch +x86-efistub-avoid-returning-efi_success-on-error.patch +x86-efistub-revert-to-heap-allocated-boot_params-for-pe-entrypoint.patch diff --git a/queue-6.6/x86-efistub-avoid-returning-efi_success-on-error.patch b/queue-6.6/x86-efistub-avoid-returning-efi_success-on-error.patch new file mode 100644 index 00000000000..a36d6912996 --- /dev/null +++ b/queue-6.6/x86-efistub-avoid-returning-efi_success-on-error.patch @@ -0,0 +1,40 @@ +From fb318ca0a522295edd6d796fb987e99ec41f0ee5 Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Thu, 4 Jul 2024 10:59:23 +0200 +Subject: x86/efistub: Avoid returning EFI_SUCCESS on error + +From: Ard Biesheuvel + +commit fb318ca0a522295edd6d796fb987e99ec41f0ee5 upstream. + +The fail label is only used in a situation where the previous EFI API +call succeeded, and so status will be set to EFI_SUCCESS. Fix this, by +dropping the goto entirely, and call efi_exit() with the correct error +code. + +Signed-off-by: Ard Biesheuvel +Signed-off-by: Greg Kroah-Hartman +--- + drivers/firmware/efi/libstub/x86-stub.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/firmware/efi/libstub/x86-stub.c ++++ b/drivers/firmware/efi/libstub/x86-stub.c +@@ -501,16 +501,13 @@ efi_status_t __efiapi efi_pe_entry(efi_h + /* Convert unicode cmdline to ascii */ + cmdline_ptr = efi_convert_cmdline(image, &options_size); + if (!cmdline_ptr) +- goto fail; ++ efi_exit(handle, EFI_OUT_OF_RESOURCES); + + efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, + &boot_params.ext_cmd_line_ptr); + + efi_stub_entry(handle, sys_table_arg, &boot_params); + /* not reached */ +- +-fail: +- efi_exit(handle, status); + } + + static void add_e820ext(struct boot_params *params, diff --git a/queue-6.6/x86-efistub-revert-to-heap-allocated-boot_params-for-pe-entrypoint.patch b/queue-6.6/x86-efistub-revert-to-heap-allocated-boot_params-for-pe-entrypoint.patch new file mode 100644 index 00000000000..4b141842a5c --- /dev/null +++ b/queue-6.6/x86-efistub-revert-to-heap-allocated-boot_params-for-pe-entrypoint.patch @@ -0,0 +1,76 @@ +From ae835a96d72cd025421910edb0e8faf706998727 Mon Sep 17 00:00:00 2001 +From: Ard Biesheuvel +Date: Fri, 22 Mar 2024 18:11:32 +0100 +Subject: x86/efistub: Revert to heap allocated boot_params for PE entrypoint + +From: Ard Biesheuvel + +commit ae835a96d72cd025421910edb0e8faf706998727 upstream. + +This is a partial revert of commit + + 8117961d98f ("x86/efi: Disregard setup header of loaded image") + +which triggers boot issues on older Dell laptops. As it turns out, +switching back to a heap allocation for the struct boot_params +constructed by the EFI stub works around this, even though it is unclear +why. + +Cc: Christian Heusel +Reported-by: +Signed-off-by: Ard Biesheuvel +Signed-off-by: Greg Kroah-Hartman +--- + drivers/firmware/efi/libstub/x86-stub.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +--- a/drivers/firmware/efi/libstub/x86-stub.c ++++ b/drivers/firmware/efi/libstub/x86-stub.c +@@ -469,11 +469,12 @@ void __noreturn efi_stub_entry(efi_handl + efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, + efi_system_table_t *sys_table_arg) + { +- static struct boot_params boot_params __page_aligned_bss; +- struct setup_header *hdr = &boot_params.hdr; + efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; ++ struct boot_params *boot_params; ++ struct setup_header *hdr; + int options_size = 0; + efi_status_t status; ++ unsigned long alloc; + char *cmdline_ptr; + + if (efi_is_native()) +@@ -491,6 +492,13 @@ efi_status_t __efiapi efi_pe_entry(efi_h + efi_exit(handle, status); + } + ++ status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX); ++ if (status != EFI_SUCCESS) ++ efi_exit(handle, status); ++ ++ boot_params = memset((void *)alloc, 0x0, PARAM_SIZE); ++ hdr = &boot_params->hdr; ++ + /* Assign the setup_header fields that the kernel actually cares about */ + hdr->root_flags = 1; + hdr->vid_mode = 0xffff; +@@ -500,13 +508,15 @@ efi_status_t __efiapi efi_pe_entry(efi_h + + /* Convert unicode cmdline to ascii */ + cmdline_ptr = efi_convert_cmdline(image, &options_size); +- if (!cmdline_ptr) ++ if (!cmdline_ptr) { ++ efi_free(PARAM_SIZE, alloc); + efi_exit(handle, EFI_OUT_OF_RESOURCES); ++ } + + efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, +- &boot_params.ext_cmd_line_ptr); ++ &boot_params->ext_cmd_line_ptr); + +- efi_stub_entry(handle, sys_table_arg, &boot_params); ++ efi_stub_entry(handle, sys_table_arg, boot_params); + /* not reached */ + } +