]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm, PM: use for_each_valid_pfn() in kernel/power/snapshot.c
authorDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 23 Apr 2025 13:33:40 +0000 (14:33 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 13 May 2025 06:50:44 +0000 (23:50 -0700)
Link: https://lkml.kernel.org/r/20250423133821.789413-5-dwmw2@infradead.org
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Marc Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Ruihan Li <lrh2000@pku.edu.cn>
Cc: Will Deacon <will@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/power/snapshot.c

index 4e6e24e8b85441ffe1ae515db268fe495517a686..2af36cfe35cdf4befb803d97a33ce1a0ce1a717c 100644 (file)
@@ -1094,16 +1094,15 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
                         ((unsigned long long) region->end_pfn << PAGE_SHIFT)
                                - 1);
 
-               for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
-                       if (pfn_valid(pfn)) {
-                               /*
-                                * It is safe to ignore the result of
-                                * mem_bm_set_bit_check() here, since we won't
-                                * touch the PFNs for which the error is
-                                * returned anyway.
-                                */
-                               mem_bm_set_bit_check(bm, pfn);
-                       }
+               for_each_valid_pfn(pfn, region->start_pfn, region->end_pfn) {
+                       /*
+                        * It is safe to ignore the result of
+                        * mem_bm_set_bit_check() here, since we won't
+                        * touch the PFNs for which the error is
+                        * returned anyway.
+                        */
+                       mem_bm_set_bit_check(bm, pfn);
+               }
        }
 }
 
@@ -1255,21 +1254,20 @@ static void mark_free_pages(struct zone *zone)
        spin_lock_irqsave(&zone->lock, flags);
 
        max_zone_pfn = zone_end_pfn(zone);
-       for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-               if (pfn_valid(pfn)) {
-                       page = pfn_to_page(pfn);
+       for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
+               page = pfn_to_page(pfn);
 
-                       if (!--page_count) {
-                               touch_nmi_watchdog();
-                               page_count = WD_PAGE_COUNT;
-                       }
+               if (!--page_count) {
+                       touch_nmi_watchdog();
+                       page_count = WD_PAGE_COUNT;
+               }
 
-                       if (page_zone(page) != zone)
-                               continue;
+               if (page_zone(page) != zone)
+                       continue;
 
-                       if (!swsusp_page_is_forbidden(page))
-                               swsusp_unset_page_free(page);
-               }
+               if (!swsusp_page_is_forbidden(page))
+                       swsusp_unset_page_free(page);
+       }
 
        for_each_migratetype_order(order, t) {
                list_for_each_entry(page,