]> git.ipfire.org Git - thirdparty/kernel/linux.git/blobdiff - mm/page_alloc.c
mm: rename and change semantics of nr_indirectly_reclaimable_bytes
[thirdparty/kernel/linux.git] / mm / page_alloc.c
index e2ef1c17942fa6934af3fe5dfe99c630002b8e80..20f25d06c00c14dd3a83cd9e21bb8680cd00135c 100644 (file)
@@ -3922,6 +3922,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
 {
        struct zone *zone;
        struct zoneref *z;
+       bool ret = false;
 
        /*
         * Costly allocations might have made a progress but this doesn't mean
@@ -3985,25 +3986,24 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
                                }
                        }
 
-                       /*
-                        * Memory allocation/reclaim might be called from a WQ
-                        * context and the current implementation of the WQ
-                        * concurrency control doesn't recognize that
-                        * a particular WQ is congested if the worker thread is
-                        * looping without ever sleeping. Therefore we have to
-                        * do a short sleep here rather than calling
-                        * cond_resched().
-                        */
-                       if (current->flags & PF_WQ_WORKER)
-                               schedule_timeout_uninterruptible(1);
-                       else
-                               cond_resched();
-
-                       return true;
+                       ret = true;
+                       goto out;
                }
        }
 
-       return false;
+out:
+       /*
+        * Memory allocation/reclaim might be called from a WQ context and the
+        * current implementation of the WQ concurrency control doesn't
+        * recognize that a particular WQ is congested if the worker thread is
+        * looping without ever sleeping. Therefore we have to do a short sleep
+        * here rather than calling cond_resched().
+        */
+       if (current->flags & PF_WQ_WORKER)
+               schedule_timeout_uninterruptible(1);
+       else
+               cond_resched();
+       return ret;
 }
 
 static inline bool
@@ -4701,6 +4701,7 @@ long si_mem_available(void)
        unsigned long pagecache;
        unsigned long wmark_low = 0;
        unsigned long pages[NR_LRU_LISTS];
+       unsigned long reclaimable;
        struct zone *zone;
        int lru;
 
@@ -4726,19 +4727,13 @@ long si_mem_available(void)
        available += pagecache;
 
        /*
-        * Part of the reclaimable slab consists of items that are in use,
-        * and cannot be freed. Cap this estimate at the low watermark.
+        * Part of the reclaimable slab and other kernel memory consists of
+        * items that are in use, and cannot be freed. Cap this estimate at the
+        * low watermark.
         */
-       available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
-                    min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
-                        wmark_low);
-
-       /*
-        * Part of the kernel memory, which can be released under memory
-        * pressure.
-        */
-       available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
-               PAGE_SHIFT;
+       reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
+                       global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
+       available += reclaimable - min(reclaimable / 2, wmark_low);
 
        if (available < 0)
                available = 0;
@@ -6803,15 +6798,12 @@ static void check_for_memory(pg_data_t *pgdat, int nid)
 {
        enum zone_type zone_type;
 
-       if (N_MEMORY == N_NORMAL_MEMORY)
-               return;
-
        for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
                struct zone *zone = &pgdat->node_zones[zone_type];
                if (populated_zone(zone)) {
-                       node_set_state(nid, N_HIGH_MEMORY);
-                       if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
-                           zone_type <= ZONE_NORMAL)
+                       if (IS_ENABLED(CONFIG_HIGHMEM))
+                               node_set_state(nid, N_HIGH_MEMORY);
+                       if (zone_type <= ZONE_NORMAL)
                                node_set_state(nid, N_NORMAL_MEMORY);
                        break;
                }