]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'mm-hotfixes-stable-2024-02-22-15-02' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Feb 2024 17:43:21 +0000 (09:43 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Feb 2024 17:43:21 +0000 (09:43 -0800)
Pull misc fixes from Andrew Morton:
 "A batch of MM (and one non-MM) hotfixes.

  Ten are cc:stable and the remainder address post-6.7 issues or aren't
  considered appropriate for backporting"

* tag 'mm-hotfixes-stable-2024-02-22-15-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  kasan: guard release_free_meta() shadow access with kasan_arch_is_ready()
  mm/damon/lru_sort: fix quota status loss due to online tunings
  mm/damon/reclaim: fix quota stauts loss due to online tunings
  MAINTAINERS: mailmap: update Shakeel's email address
  mm/damon/sysfs-schemes: handle schemes sysfs dir removal before commit_schemes_quota_goals
  mm: memcontrol: clarify swapaccount=0 deprecation warning
  mm/memblock: add MEMBLOCK_RSRV_NOINIT into flagname[] array
  mm/zswap: invalidate duplicate entry when !zswap_enabled
  lib/Kconfig.debug: TEST_IOV_ITER depends on MMU
  mm/swap: fix race when skipping swapcache
  mm/swap_state: update zswap LRU's protection range with the folio locked
  selftests/mm: uffd-unit-test check if huge page size is 0
  mm/damon/core: check apply interval in damon_do_apply_schemes()
  mm: zswap: fix missing folio cleanup in writeback race path

17 files changed:
.mailmap
MAINTAINERS
include/linux/swap.h
lib/Kconfig.debug
mm/damon/core.c
mm/damon/lru_sort.c
mm/damon/reclaim.c
mm/damon/sysfs-schemes.c
mm/kasan/generic.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/swap.h
mm/swap_state.c
mm/swapfile.c
mm/zswap.c
tools/testing/selftests/mm/uffd-unit-tests.c

index b99a238ee3bde17fdf4e0f6b9ca0aee81e1dc9a7..08f28f2999f0dc5d64cb5d04a77c7c3eab78130a 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -553,6 +553,7 @@ Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
 Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
 Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
 Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
+Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
 Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
 Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
index bf77be03fb2b3c3564c497fa1eac1edfd3b9cbca..189184ecdb11535e701907e12b778ce1a1883e6b 100644 (file)
@@ -5378,7 +5378,7 @@ CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
 M:     Roman Gushchin <roman.gushchin@linux.dev>
-M:     Shakeel Butt <shakeelb@google.com>
+M:     Shakeel Butt <shakeel.butt@linux.dev>
 R:     Muchun Song <muchun.song@linux.dev>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
index 4db00ddad26169060e1d42d5ca9b9723546ab81c..8d28f6091a320ef024597dfd6f84526702d5ec41 100644 (file)
@@ -549,6 +549,11 @@ static inline int swap_duplicate(swp_entry_t swp)
        return 0;
 }
 
+static inline int swapcache_prepare(swp_entry_t swp)
+{
+       return 0;
+}
+
 static inline void swap_free(swp_entry_t swp)
 {
 }
index 975a07f9f1cc08838d272f83d5f04a85ff2f5cd2..ef36b829ae1f55bcfe4c58b567ded4fc348db0af 100644 (file)
@@ -2235,6 +2235,7 @@ config TEST_DIV64
 config TEST_IOV_ITER
        tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
        depends on KUNIT
+       depends on MMU
        default KUNIT_ALL_TESTS
        help
          Enable this to turn on testing of the operation of the I/O iterator
index 36f6f1d21ff069de12575a4f0d932e0dfc316c11..5b325749fc12597ddd273ae605bdb1c04a93f99e 100644 (file)
@@ -1026,6 +1026,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
        damon_for_each_scheme(s, c) {
                struct damos_quota *quota = &s->quota;
 
+               if (c->passed_sample_intervals != s->next_apply_sis)
+                       continue;
+
                if (!s->wmarks.activated)
                        continue;
 
@@ -1176,10 +1179,6 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
                if (c->passed_sample_intervals != s->next_apply_sis)
                        continue;
 
-               s->next_apply_sis +=
-                       (s->apply_interval_us ? s->apply_interval_us :
-                        c->attrs.aggr_interval) / sample_interval;
-
                if (!s->wmarks.activated)
                        continue;
 
@@ -1195,6 +1194,14 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
                damon_for_each_region_safe(r, next_r, t)
                        damon_do_apply_schemes(c, t, r);
        }
+
+       damon_for_each_scheme(s, c) {
+               if (c->passed_sample_intervals != s->next_apply_sis)
+                       continue;
+               s->next_apply_sis +=
+                       (s->apply_interval_us ? s->apply_interval_us :
+                        c->attrs.aggr_interval) / sample_interval;
+       }
 }
 
 /*
index f2e5f9431892eb207bec1da87224282e3de27371..3de2916a65c38c372b5ed8472b7a87b34026aed7 100644 (file)
@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
        return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
 }
 
+static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
+               struct damos_quota *src)
+{
+       dst->total_charged_sz = src->total_charged_sz;
+       dst->total_charged_ns = src->total_charged_ns;
+       dst->charged_sz = src->charged_sz;
+       dst->charged_from = src->charged_from;
+       dst->charge_target_from = src->charge_target_from;
+       dst->charge_addr_from = src->charge_addr_from;
+}
+
 static int damon_lru_sort_apply_parameters(void)
 {
-       struct damos *scheme;
+       struct damos *scheme, *hot_scheme, *cold_scheme;
+       struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
        unsigned int hot_thres, cold_thres;
        int err = 0;
 
@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
        if (err)
                return err;
 
+       damon_for_each_scheme(scheme, ctx) {
+               if (!old_hot_scheme) {
+                       old_hot_scheme = scheme;
+                       continue;
+               }
+               old_cold_scheme = scheme;
+       }
+
        hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
                hot_thres_access_freq / 1000;
-       scheme = damon_lru_sort_new_hot_scheme(hot_thres);
-       if (!scheme)
+       hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+       if (!hot_scheme)
                return -ENOMEM;
-       damon_set_schemes(ctx, &scheme, 1);
+       if (old_hot_scheme)
+               damon_lru_sort_copy_quota_status(&hot_scheme->quota,
+                               &old_hot_scheme->quota);
 
        cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
-       scheme = damon_lru_sort_new_cold_scheme(cold_thres);
-       if (!scheme)
+       cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+       if (!cold_scheme) {
+               damon_destroy_scheme(hot_scheme);
                return -ENOMEM;
-       damon_add_scheme(ctx, scheme);
+       }
+       if (old_cold_scheme)
+               damon_lru_sort_copy_quota_status(&cold_scheme->quota,
+                               &old_cold_scheme->quota);
+
+       damon_set_schemes(ctx, &hot_scheme, 1);
+       damon_add_scheme(ctx, cold_scheme);
 
        return damon_set_region_biggest_system_ram_default(target,
                                        &monitor_region_start,
index ab974e477d2f2850f642fbbafac48a8b3a5d136b..66e190f0374ac84b47100b8ba21fe4c32e104891 100644 (file)
@@ -150,9 +150,20 @@ static struct damos *damon_reclaim_new_scheme(void)
                        &damon_reclaim_wmarks);
 }
 
+static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
+               struct damos_quota *src)
+{
+       dst->total_charged_sz = src->total_charged_sz;
+       dst->total_charged_ns = src->total_charged_ns;
+       dst->charged_sz = src->charged_sz;
+       dst->charged_from = src->charged_from;
+       dst->charge_target_from = src->charge_target_from;
+       dst->charge_addr_from = src->charge_addr_from;
+}
+
 static int damon_reclaim_apply_parameters(void)
 {
-       struct damos *scheme;
+       struct damos *scheme, *old_scheme;
        struct damos_filter *filter;
        int err = 0;
 
@@ -164,6 +175,11 @@ static int damon_reclaim_apply_parameters(void)
        scheme = damon_reclaim_new_scheme();
        if (!scheme)
                return -ENOMEM;
+       if (!list_empty(&ctx->schemes)) {
+               damon_for_each_scheme(old_scheme, ctx)
+                       damon_reclaim_copy_quota_status(&scheme->quota,
+                                       &old_scheme->quota);
+       }
        if (skip_anon) {
                filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
                if (!filter) {
index dd2fb512700920803b10621b82ffaa88bff30a92..ae0f0b314f3a9a5ec251021d0fb68d423fa53cd7 100644 (file)
@@ -1905,6 +1905,10 @@ void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
        damon_for_each_scheme(scheme, ctx) {
                struct damon_sysfs_scheme *sysfs_scheme;
 
+               /* user could have removed the scheme sysfs dir */
+               if (i >= sysfs_schemes->nr)
+                       break;
+
                sysfs_scheme = sysfs_schemes->schemes_arr[i];
                damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
                                &scheme->quota);
index df6627f62402c01dab04e6955bf80e7fb4b4b2ae..032bf3e98c240183f13fc7cb8d6f42d1f797be34 100644 (file)
@@ -522,6 +522,9 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
+       if (!kasan_arch_is_ready())
+               return;
+
        /* Check if free meta is valid. */
        if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
                return;
index 4dcb2ee35eca856a43694f4402dea0c1c9bf6d8a..d9f4b82cbffeb8bc5ba3b0be1f9aac5d4337da7d 100644 (file)
@@ -2249,6 +2249,7 @@ static const char * const flagname[] = {
        [ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
        [ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
        [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
+       [ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
 };
 
 static int memblock_debug_show(struct seq_file *m, void *private)
index 1ed40f9d3a277ec8912c77326c5527a259a96c47..61932c9215e7734e4dfc7dc6e427c3692d1c3c6f 100644 (file)
@@ -7971,9 +7971,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
 
 static int __init setup_swap_account(char *s)
 {
-       pr_warn_once("The swapaccount= commandline option is deprecated. "
-                    "Please report your usecase to linux-mm@kvack.org if you "
-                    "depend on this functionality.\n");
+       bool res;
+
+       if (!kstrtobool(s, &res) && !res)
+               pr_warn_once("The swapaccount=0 commandline option is deprecated "
+                            "in favor of configuring swap control via cgroupfs. "
+                            "Please report your usecase to linux-mm@kvack.org if you "
+                            "depend on this functionality.\n");
        return 1;
 }
 __setup("swapaccount=", setup_swap_account);
index 15f8b10ea17c4f28e857009372f50bfd774001b9..0bfc8b007c01a3323a15a17d51c4da46a6207540 100644 (file)
@@ -3799,6 +3799,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        struct page *page;
        struct swap_info_struct *si = NULL;
        rmap_t rmap_flags = RMAP_NONE;
+       bool need_clear_cache = false;
        bool exclusive = false;
        swp_entry_t entry;
        pte_t pte;
@@ -3867,6 +3868,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        if (!folio) {
                if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
                    __swap_count(entry) == 1) {
+                       /*
+                        * Prevent parallel swapin from proceeding with
+                        * the cache flag. Otherwise, another thread may
+                        * finish swapin first, free the entry, and swapout
+                        * reusing the same entry. It's undetectable as
+                        * pte_same() returns true due to entry reuse.
+                        */
+                       if (swapcache_prepare(entry)) {
+                               /* Relax a bit to prevent rapid repeated page faults */
+                               schedule_timeout_uninterruptible(1);
+                               goto out;
+                       }
+                       need_clear_cache = true;
+
                        /* skip swapcache */
                        folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
                                                vma, vmf->address, false);
@@ -4117,6 +4132,9 @@ unlock:
        if (vmf->pte)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
+       /* Clear the swap cache pin for direct swapin after PTL unlock */
+       if (need_clear_cache)
+               swapcache_clear(si, entry);
        if (si)
                put_swap_device(si);
        return ret;
@@ -4131,6 +4149,8 @@ out_release:
                folio_unlock(swapcache);
                folio_put(swapcache);
        }
+       if (need_clear_cache)
+               swapcache_clear(si, entry);
        if (si)
                put_swap_device(si);
        return ret;
index 758c46ca671ed110ae8e25fad48196d3feed03dc..fc2f6ade7f80b399707bcc67c44f813aea0b846d 100644 (file)
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -41,6 +41,7 @@ void __delete_from_swap_cache(struct folio *folio,
 void delete_from_swap_cache(struct folio *folio);
 void clear_shadow_from_swap_cache(int type, unsigned long begin,
                                  unsigned long end);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
 struct folio *swap_cache_get_folio(swp_entry_t entry,
                struct vm_area_struct *vma, unsigned long addr);
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -97,6 +98,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
        return 0;
 }
 
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+}
+
 static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
                struct vm_area_struct *vma, unsigned long addr)
 {
index e671266ad77241f461a17cbb2e486fe48a423f69..7255c01a1e4e16d758186019f904e70a7890a5cc 100644 (file)
@@ -680,9 +680,10 @@ skip:
        /* The page was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
@@ -855,9 +856,10 @@ skip:
        /* The folio was likely read above, so no need for plugging here */
        folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
                                        &page_allocated, false);
-       if (unlikely(page_allocated))
+       if (unlikely(page_allocated)) {
+               zswap_folio_swapin(folio);
                swap_read_folio(folio, false, NULL);
-       zswap_folio_swapin(folio);
+       }
        return folio;
 }
 
index 556ff7347d5f04402b61cc5bd9d0d123a36dc1d5..746aa9da530255035b4624fefff862d416af836d 100644 (file)
@@ -3365,6 +3365,19 @@ int swapcache_prepare(swp_entry_t entry)
        return __swap_duplicate(entry, SWAP_HAS_CACHE);
 }
 
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+       struct swap_cluster_info *ci;
+       unsigned long offset = swp_offset(entry);
+       unsigned char usage;
+
+       ci = lock_cluster_or_swap_info(si, offset);
+       usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
+       unlock_cluster_or_swap_info(si, ci);
+       if (!usage)
+               free_swap_slot(entry);
+}
+
 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 {
        return swap_type_to_swap_info(swp_type(entry));
index 350dd2fc815994739d2012e0bcf483445350bb88..db4625af65fb7f6655a057e145bbe20dd64f7ae9 100644 (file)
@@ -377,10 +377,9 @@ void zswap_folio_swapin(struct folio *folio)
 {
        struct lruvec *lruvec;
 
-       if (folio) {
-               lruvec = folio_lruvec(folio);
-               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
-       }
+       VM_WARN_ON_ONCE(!folio_test_locked(folio));
+       lruvec = folio_lruvec(folio);
+       atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
 }
 
 /*********************************
@@ -1440,6 +1439,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
        if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
                spin_unlock(&tree->lock);
                delete_from_swap_cache(folio);
+               folio_unlock(folio);
+               folio_put(folio);
                return -ENOMEM;
        }
        spin_unlock(&tree->lock);
@@ -1517,7 +1518,7 @@ bool zswap_store(struct folio *folio)
        if (folio_test_large(folio))
                return false;
 
-       if (!zswap_enabled || !tree)
+       if (!tree)
                return false;
 
        /*
@@ -1532,6 +1533,10 @@ bool zswap_store(struct folio *folio)
                zswap_invalidate_entry(tree, dupentry);
        }
        spin_unlock(&tree->lock);
+
+       if (!zswap_enabled)
+               return false;
+
        objcg = get_obj_cgroup_from_folio(folio);
        if (objcg && !obj_cgroup_may_zswap(objcg)) {
                memcg = get_mem_cgroup_from_objcg(objcg);
index cce90a10515ad2fe78fe68147d26732d717c3bb6..2b9f8cc52639d1942238b41a1ad55edc6bd406ed 100644 (file)
@@ -1517,6 +1517,12 @@ int main(int argc, char *argv[])
                                continue;
 
                        uffd_test_start("%s on %s", test->name, mem_type->name);
+                       if ((mem_type->mem_flag == MEM_HUGETLB ||
+                           mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
+                           (default_huge_page_size() == 0)) {
+                               uffd_test_skip("huge page size is 0, feature missing?");
+                               continue;
+                       }
                        if (!uffd_feature_supported(test)) {
                                uffd_test_skip("feature missing");
                                continue;