]> git.ipfire.org Git - thirdparty/kernel/linux.git/blobdiff - mm/page_alloc.c
mm/page_alloc: convert zone_pcp_update() to rely on memory barriers instead of stop_m...
[thirdparty/kernel/linux.git] / mm / page_alloc.c
index 97b8f861e63d49c6ede9eafc46a9b11410e3a281..8125263be60f98f848acd1ee375a3d8d3f199baa 100644 (file)
@@ -6085,33 +6085,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
 #endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-static int __meminit __zone_pcp_update(void *data)
-{
-       struct zone *zone = data;
-       int cpu;
-       unsigned long batch = zone_batchsize(zone), flags;
-
-       for_each_possible_cpu(cpu) {
-               struct per_cpu_pageset *pset;
-               struct per_cpu_pages *pcp;
-
-               pset = per_cpu_ptr(zone->pageset, cpu);
-               pcp = &pset->pcp;
-
-               local_irq_save(flags);
-               if (pcp->count > 0)
-                       free_pcppages_bulk(zone, pcp->count, pcp);
-               drain_zonestat(zone, pset);
-               setup_pageset(pset, batch);
-               local_irq_restore(flags);
-       }
-       return 0;
-}
-
+/*
+ * The zone indicated has a new number of managed_pages; batch sizes and percpu
+ * page high values need to be recalulated.
+ */
 void __meminit zone_pcp_update(struct zone *zone)
 {
+       unsigned cpu;
+       unsigned long batch;
        mutex_lock(&pcp_batch_high_lock);
-       stop_machine(__zone_pcp_update, zone, NULL);
+       batch = zone_batchsize(zone);
+       for_each_possible_cpu(cpu)
+               pageset_set_batch(per_cpu_ptr(zone->pageset, cpu), batch);
        mutex_unlock(&pcp_batch_high_lock);
 }
 #endif