]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
s390/boot: rename mem_detect to physmem_info
authorVasily Gorbik <gor@linux.ibm.com>
Wed, 8 Feb 2023 17:11:25 +0000 (18:11 +0100)
committerHeiko Carstens <hca@linux.ibm.com>
Mon, 20 Mar 2023 10:02:50 +0000 (11:02 +0100)
In preparation to extending mem_detect with additional information like
reserved ranges rename it to more generic physmem_info. This new naming
also help to avoid confusion by using more exact terms like "physmem
online ranges", etc.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/kaslr.c
arch/s390/boot/physmem_info.c [moved from arch/s390/boot/mem_detect.c with 63% similarity]
arch/s390/boot/startup.c
arch/s390/boot/vmem.c
arch/s390/include/asm/mem_detect.h [deleted file]
arch/s390/include/asm/physmem_info.h [new file with mode: 0644]
arch/s390/kernel/setup.c
arch/s390/mm/kasan_init.c
drivers/s390/char/sclp_early_core.c

index cebd4ca16916496e513ba73d47ddac697fca97b8..c7c81e5f9218993f5726bf20228e2594ce2fe40c 100644 (file)
@@ -35,7 +35,7 @@ endif
 
 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
-obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
+obj-y  := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
 obj-y  += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
 obj-y  += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
index 58ce701d6110427e39748d6899eb2b179ed591b1..d39895d5796ed273856e621486b0d240ebcb57ed 100644 (file)
@@ -34,7 +34,7 @@ struct vmlinux_info {
 
 void startup_kernel(void);
 unsigned long detect_memory(unsigned long *safe_addr);
-void mem_detect_set_usable_limit(unsigned long limit);
+void physmem_set_usable_limit(unsigned long limit);
 bool is_ipl_block_dump(void);
 void store_ipl_parmblock(void);
 unsigned long read_ipl_report(unsigned long safe_addr);
index 3e3d846400b4e85d5b0edc34cfe422f0736a2d6a..22b7c5d8e94a6641f9c75981d967582f190d6221 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright IBM Corp. 2019
  */
 #include <linux/pgtable.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/cpacf.h>
 #include <asm/timex.h>
 #include <asm/sclp.h>
@@ -93,7 +93,7 @@ static int get_random(unsigned long limit, unsigned long *value)
 
 /*
  * To randomize kernel base address we have to consider several facts:
- * 1. physical online memory might not be continuous and have holes. mem_detect
+ * 1. physical online memory might not be continuous and have holes. physmem
  *    info contains list of online memory ranges we should consider.
  * 2. we have several memory regions which are occupied and we should not
  *    overlap and destroy them. Currently safe_addr tells us the border below
@@ -108,7 +108,7 @@ static int get_random(unsigned long limit, unsigned long *value)
  *    (16 pages when the kernel is built with kasan enabled)
  * Assumptions:
  * 1. kernel size (including .bss size) and upper memory limit are page aligned.
- * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
+ * 2. physmem online region start is THREAD_SIZE aligned / end is PAGE_SIZE
  *    aligned (in practice memory configurations granularity on z/VM and LPAR
  *    is 1mb).
  *
@@ -132,7 +132,7 @@ static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
        unsigned long start, end, pos = 0;
        int i;
 
-       for_each_mem_detect_usable_block(i, &start, &end) {
+       for_each_physmem_usable_range(i, &start, &end) {
                if (_min >= end)
                        continue;
                if (start >= _max)
@@ -153,7 +153,7 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel
        unsigned long start, end;
        int i;
 
-       for_each_mem_detect_usable_block(i, &start, &end) {
+       for_each_physmem_usable_range(i, &start, &end) {
                if (_min >= end)
                        continue;
                if (start >= _max)
@@ -172,8 +172,8 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel
 
 unsigned long get_random_base(unsigned long safe_addr)
 {
-       unsigned long usable_total = get_mem_detect_usable_total();
-       unsigned long memory_limit = get_mem_detect_end();
+       unsigned long usable_total = get_physmem_usable_total();
+       unsigned long memory_limit = get_physmem_usable_end();
        unsigned long base_pos, max_pos, kernel_size;
        int i;
 
similarity index 63%
rename from arch/s390/boot/mem_detect.c
rename to arch/s390/boot/physmem_info.c
index 35f4ba11f7fd47ba226c4d8e8ecb126e5a2ba1d2..dc2e4d0abfab04730a8a9a0982f9f2eae5ea841f 100644 (file)
@@ -5,44 +5,44 @@
 #include <asm/processor.h>
 #include <asm/sclp.h>
 #include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/sparsemem.h>
 #include "decompressor.h"
 #include "boot.h"
 
-struct mem_detect_info __bootdata(mem_detect);
+struct physmem_info __bootdata(physmem_info);
 
 /* up to 256 storage elements, 1020 subincrements each */
 #define ENTRIES_EXTENDED_MAX                                                  \
-       (256 * (1020 / 2) * sizeof(struct mem_detect_block))
+       (256 * (1020 / 2) * sizeof(struct physmem_range))
 
-static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+static struct physmem_range *__get_physmem_range_ptr(u32 n)
 {
        if (n < MEM_INLINED_ENTRIES)
-               return &mem_detect.entries[n];
-       return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+               return &physmem_info.online[n];
+       return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
 }
 
 /*
- * sequential calls to add_mem_detect_block with adjacent memory areas
- * are merged together into single memory block.
+ * sequential calls to add_physmem_online_range with adjacent memory ranges
+ * are merged together into single memory range.
  */
-void add_mem_detect_block(u64 start, u64 end)
+void add_physmem_online_range(u64 start, u64 end)
 {
-       struct mem_detect_block *block;
+       struct physmem_range *range;
 
-       if (mem_detect.count) {
-               block = __get_mem_detect_block_ptr(mem_detect.count - 1);
-               if (block->end == start) {
-                       block->end = end;
+       if (physmem_info.range_count) {
+               range = __get_physmem_range_ptr(physmem_info.range_count - 1);
+               if (range->end == start) {
+                       range->end = end;
                        return;
                }
        }
 
-       block = __get_mem_detect_block_ptr(mem_detect.count);
-       block->start = start;
-       block->end = end;
-       mem_detect.count++;
+       range = __get_physmem_range_ptr(physmem_info.range_count);
+       range->start = start;
+       range->end = end;
+       physmem_info.range_count++;
 }
 
 static int __diag260(unsigned long rx1, unsigned long rx2)
@@ -95,7 +95,7 @@ static int diag260(void)
                return -1;
 
        for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
-               add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
+               add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
        return 0;
 }
 
@@ -148,44 +148,44 @@ unsigned long detect_memory(unsigned long *safe_addr)
        unsigned long max_physmem_end = 0;
 
        sclp_early_get_memsize(&max_physmem_end);
-       mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
+       physmem_info.online_extended = (struct physmem_range *)ALIGN(*safe_addr, sizeof(u64));
 
        if (!sclp_early_read_storage_info()) {
-               mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+               physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
        } else if (!diag260()) {
-               mem_detect.info_source = MEM_DETECT_DIAG260;
-               max_physmem_end = max_physmem_end ?: get_mem_detect_end();
+               physmem_info.info_source = MEM_DETECT_DIAG260;
+               max_physmem_end = max_physmem_end ?: get_physmem_usable_end();
        } else if (max_physmem_end) {
-               add_mem_detect_block(0, max_physmem_end);
-               mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+               add_physmem_online_range(0, max_physmem_end);
+               physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
        } else {
                max_physmem_end = search_mem_end();
-               add_mem_detect_block(0, max_physmem_end);
-               mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+               add_physmem_online_range(0, max_physmem_end);
+               physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
        }
 
-       if (mem_detect.count > MEM_INLINED_ENTRIES) {
-               *safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
-                            sizeof(struct mem_detect_block);
+       if (physmem_info.range_count > MEM_INLINED_ENTRIES) {
+               *safe_addr += (physmem_info.range_count - MEM_INLINED_ENTRIES) *
+                             sizeof(struct physmem_range);
        }
 
        return max_physmem_end;
 }
 
-void mem_detect_set_usable_limit(unsigned long limit)
+void physmem_set_usable_limit(unsigned long limit)
 {
-       struct mem_detect_block *block;
+       struct physmem_range *range;
        int i;
 
        /* make sure mem_detect.usable ends up within online memory block */
-       for (i = 0; i < mem_detect.count; i++) {
-               block = __get_mem_detect_block_ptr(i);
-               if (block->start >= limit)
+       for (i = 0; i < physmem_info.range_count; i++) {
+               range = __get_physmem_range_ptr(i);
+               if (range->start >= limit)
                        break;
-               if (block->end >= limit) {
-                       mem_detect.usable = limit;
+               if (range->end >= limit) {
+                       physmem_info.usable = limit;
                        break;
                }
-               mem_detect.usable = block->end;
+               physmem_info.usable = range->end;
        }
 }
index 16ee3469f744662bad47d6ea81d246ea5f6b1ed0..50475bf25ecda0816af0360bdbcd09627384766a 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/diag.h>
 #include <asm/uv.h>
 #include <asm/abs_lowcore.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include "decompressor.h"
 #include "boot.h"
 #include "uv.h"
@@ -139,7 +139,7 @@ static void handle_relocs(unsigned long offset)
  *
  * Consider the following factors:
  * 1. max_physmem_end - end of physical memory online or standby.
- *    Always <= end of the last online memory block (get_mem_detect_end()).
+ *    Always >= end of the last online memory range (get_physmem_online_end()).
  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
  *    kernel is able to support.
  * 3. "mem=" kernel command line option which limits physical memory usage.
@@ -303,7 +303,7 @@ void startup_kernel(void)
        setup_ident_map_size(max_physmem_end);
        setup_vmalloc_size();
        asce_limit = setup_kernel_memory_layout();
-       mem_detect_set_usable_limit(ident_map_size);
+       physmem_set_usable_limit(ident_map_size);
 
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
                random_lma = get_random_base(safe_addr);
index 4d1d0d8e99cb2ee25bd90b153181b583e1d1734e..b89a6893f398b059d54ffa8613a96e62ca82d1da 100644 (file)
@@ -4,7 +4,7 @@
 #include <asm/pgalloc.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/maccess.h>
 #include <asm/abs_lowcore.h>
 #include "decompressor.h"
@@ -51,7 +51,7 @@ static void pgtable_populate_init(void)
                pgalloc_low = max(pgalloc_low, initrd_end);
        }
 
-       pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
+       pgalloc_end = round_down(get_physmem_usable_end(), PAGE_SIZE);
        pgalloc_pos = pgalloc_end;
 
        boot_check_oom();
@@ -252,7 +252,7 @@ void setup_vmem(unsigned long asce_limit)
         */
        pgtable_populate_init();
        pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
-       for_each_mem_detect_usable_block(i, &start, &end)
+       for_each_physmem_usable_range(i, &start, &end)
                pgtable_populate(start, end, POPULATE_ONE2ONE);
        pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
                         POPULATE_ABS_LOWCORE);
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
deleted file mode 100644 (file)
index f9e7354..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_MEM_DETECT_H
-#define _ASM_S390_MEM_DETECT_H
-
-#include <linux/types.h>
-
-enum mem_info_source {
-       MEM_DETECT_NONE = 0,
-       MEM_DETECT_SCLP_STOR_INFO,
-       MEM_DETECT_DIAG260,
-       MEM_DETECT_SCLP_READ_INFO,
-       MEM_DETECT_BIN_SEARCH
-};
-
-struct mem_detect_block {
-       u64 start;
-       u64 end;
-};
-
-/*
- * Storage element id is defined as 1 byte (up to 256 storage elements).
- * In practise only storage element id 0 and 1 are used).
- * According to architecture one storage element could have as much as
- * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
- * If more mem_detect_blocks are required, a block of memory from already
- * known mem_detect_block is taken (entries_extended points to it).
- */
-#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
-
-struct mem_detect_info {
-       u32 count;
-       u8 info_source;
-       unsigned long usable;
-       struct mem_detect_block entries[MEM_INLINED_ENTRIES];
-       struct mem_detect_block *entries_extended;
-};
-extern struct mem_detect_info mem_detect;
-
-void add_mem_detect_block(u64 start, u64 end);
-
-static inline int __get_mem_detect_block(u32 n, unsigned long *start,
-                                        unsigned long *end, bool respect_usable_limit)
-{
-       if (n >= mem_detect.count) {
-               *start = 0;
-               *end = 0;
-               return -1;
-       }
-
-       if (n < MEM_INLINED_ENTRIES) {
-               *start = (unsigned long)mem_detect.entries[n].start;
-               *end = (unsigned long)mem_detect.entries[n].end;
-       } else {
-               *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
-               *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
-       }
-
-       if (respect_usable_limit && mem_detect.usable) {
-               if (*start >= mem_detect.usable)
-                       return -1;
-               if (*end > mem_detect.usable)
-                       *end = mem_detect.usable;
-       }
-       return 0;
-}
-
-/**
- * for_each_mem_detect_usable_block - early online memory range iterator
- * @i: an integer used as loop variable
- * @p_start: ptr to unsigned long for start address of the range
- * @p_end: ptr to unsigned long for end address of the range
- *
- * Walks over detected online memory ranges below usable limit.
- */
-#define for_each_mem_detect_usable_block(i, p_start, p_end)            \
-       for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
-
-/* Walks over all detected online memory ranges disregarding usable limit. */
-#define for_each_mem_detect_block(i, p_start, p_end)           \
-       for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
-
-static inline unsigned long get_mem_detect_usable_total(void)
-{
-       unsigned long start, end, total = 0;
-       int i;
-
-       for_each_mem_detect_usable_block(i, &start, &end)
-               total += end - start;
-
-       return total;
-}
-
-static inline void get_mem_detect_reserved(unsigned long *start,
-                                          unsigned long *size)
-{
-       *start = (unsigned long)mem_detect.entries_extended;
-       if (mem_detect.count > MEM_INLINED_ENTRIES)
-               *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
-       else
-               *size = 0;
-}
-
-static inline unsigned long get_mem_detect_end(void)
-{
-       unsigned long start;
-       unsigned long end;
-
-       if (mem_detect.usable)
-               return mem_detect.usable;
-       if (mem_detect.count) {
-               __get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
-               return end;
-       }
-       return 0;
-}
-
-#endif
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
new file mode 100644 (file)
index 0000000..d5e65a5
--- /dev/null
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum physmem_info_source {
+       MEM_DETECT_NONE = 0,
+       MEM_DETECT_SCLP_STOR_INFO,
+       MEM_DETECT_DIAG260,
+       MEM_DETECT_SCLP_READ_INFO,
+       MEM_DETECT_BIN_SEARCH
+};
+
+struct physmem_range {
+       u64 start;
+       u64 end;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
+ * If more physmem_ranges are required, a block of memory from already
+ * known physmem_range is taken (online_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct physmem_info {
+       u32 range_count;
+       u8 info_source;
+       unsigned long usable;
+       struct physmem_range online[MEM_INLINED_ENTRIES];
+       struct physmem_range *online_extended;
+};
+
+extern struct physmem_info physmem_info;
+
+void add_physmem_online_range(u64 start, u64 end);
+
+static inline int __get_physmem_range(u32 n, unsigned long *start,
+                                     unsigned long *end, bool respect_usable_limit)
+{
+       if (n >= physmem_info.range_count) {
+               *start = 0;
+               *end = 0;
+               return -1;
+       }
+
+       if (n < MEM_INLINED_ENTRIES) {
+               *start = (unsigned long)physmem_info.online[n].start;
+               *end = (unsigned long)physmem_info.online[n].end;
+       } else {
+               *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
+               *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
+       }
+
+       if (respect_usable_limit && physmem_info.usable) {
+               if (*start >= physmem_info.usable)
+                       return -1;
+               if (*end > physmem_info.usable)
+                       *end = physmem_info.usable;
+       }
+       return 0;
+}
+
+/**
+ * for_each_physmem_usable_range - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges below usable limit.
+ */
+#define for_each_physmem_usable_range(i, p_start, p_end)               \
+       for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
+
+/* Walks over all detected online memory ranges disregarding usable limit. */
+#define for_each_physmem_online_range(i, p_start, p_end)               \
+       for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
+
+static inline unsigned long get_physmem_usable_total(void)
+{
+       unsigned long start, end, total = 0;
+       int i;
+
+       for_each_physmem_usable_range(i, &start, &end)
+               total += end - start;
+
+       return total;
+}
+
+static inline void get_physmem_reserved(unsigned long *start, unsigned long *size)
+{
+       *start = (unsigned long)physmem_info.online_extended;
+       if (physmem_info.range_count > MEM_INLINED_ENTRIES)
+               *size = (physmem_info.range_count - MEM_INLINED_ENTRIES) *
+                       sizeof(struct physmem_range);
+       else
+               *size = 0;
+}
+
+static inline unsigned long get_physmem_usable_end(void)
+{
+       unsigned long start;
+       unsigned long end;
+
+       if (physmem_info.usable)
+               return physmem_info.usable;
+       if (physmem_info.range_count) {
+               __get_physmem_range(physmem_info.range_count - 1, &start, &end, false);
+               return end;
+       }
+       return 0;
+}
+
+#endif
index 8ec5cdf9dadc77d029dc45866f258f80101fc7be..f909a2dc8a5a2ad0a604ff57b460fa38df8c54c0 100644 (file)
@@ -74,7 +74,7 @@
 #include <asm/numa.h>
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/maccess.h>
 #include <asm/uv.h>
 #include <asm/asm-offsets.h>
@@ -147,7 +147,7 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 
 int __bootdata(noexec_disabled);
 unsigned long __bootdata(ident_map_size);
-struct mem_detect_info __bootdata(mem_detect);
+struct physmem_info __bootdata(physmem_info);
 struct initrd_data __bootdata(initrd_data);
 unsigned long __bootdata(pgalloc_pos);
 unsigned long __bootdata(pgalloc_end);
@@ -730,27 +730,27 @@ static void __init reserve_certificate_list(void)
                memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
 }
 
-static void __init reserve_mem_detect_info(void)
+static void __init reserve_physmem_info(void)
 {
        unsigned long start, size;
 
-       get_mem_detect_reserved(&start, &size);
+       get_physmem_reserved(&start, &size);
        if (size)
                memblock_reserve(start, size);
 }
 
-static void __init free_mem_detect_info(void)
+static void __init free_physmem_info(void)
 {
        unsigned long start, size;
 
-       get_mem_detect_reserved(&start, &size);
+       get_physmem_reserved(&start, &size);
        if (size)
                memblock_phys_free(start, size);
 }
 
 static const char * __init get_mem_info_source(void)
 {
-       switch (mem_detect.info_source) {
+       switch (physmem_info.info_source) {
        case MEM_DETECT_SCLP_STOR_INFO:
                return "sclp storage info";
        case MEM_DETECT_DIAG260:
@@ -763,18 +763,18 @@ static const char * __init get_mem_info_source(void)
        return "none";
 }
 
-static void __init memblock_add_mem_detect_info(void)
+static void __init memblock_add_physmem_info(void)
 {
        unsigned long start, end;
        int i;
 
        pr_debug("physmem info source: %s (%hhd)\n",
-                get_mem_info_source(), mem_detect.info_source);
+                get_mem_info_source(), physmem_info.info_source);
        /* keep memblock lists close to the kernel */
        memblock_set_bottom_up(true);
-       for_each_mem_detect_usable_block(i, &start, &end)
+       for_each_physmem_usable_range(i, &start, &end)
                memblock_add(start, end - start);
-       for_each_mem_detect_block(i, &start, &end)
+       for_each_physmem_online_range(i, &start, &end)
                memblock_physmem_add(start, end - start);
        memblock_set_bottom_up(false);
        memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
@@ -997,14 +997,14 @@ void __init setup_arch(char **cmdline_p)
        reserve_kernel();
        reserve_initrd();
        reserve_certificate_list();
-       reserve_mem_detect_info();
+       reserve_physmem_info();
        memblock_set_current_limit(ident_map_size);
        memblock_allow_resize();
 
        /* Get information about *all* installed memory */
-       memblock_add_mem_detect_info();
+       memblock_add_physmem_info();
 
-       free_mem_detect_info();
+       free_physmem_info();
        setup_memory_end();
        memblock_dump_all();
        setup_memory();
index ef89a5f26853de96d238ed524750987170f1f5f5..b0658136264ff3ae0f6b60ef7d069f5413f94c96 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/kasan.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/processor.h>
 #include <asm/sclp.h>
 #include <asm/facility.h>
@@ -244,7 +244,7 @@ void __init kasan_early_init(void)
        memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
 
        if (has_edat) {
-               shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
+               shadow_alloc_size = get_physmem_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
                segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
                segment_low = segment_pos - shadow_alloc_size;
                segment_low = round_down(segment_low, _SEGMENT_SIZE);
@@ -282,7 +282,7 @@ void __init kasan_early_init(void)
         * +- shadow end ----+---------+- shadow end ---+
         */
        /* populate kasan shadow (for identity mapping and zero page mapping) */
-       for_each_mem_detect_usable_block(i, &start, &end)
+       for_each_physmem_usable_range(i, &start, &end)
                kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
        if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
                untracked_end = VMALLOC_START;
index ac1d00980fa669963f380c87a70ced40c19fd16f..dbd5c53d8edfe1bf406de6dd9ab56d9d02005567 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/ebcdic.h>
 #include <asm/irq.h>
 #include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
 #include <asm/facility.h>
 #include "sclp.h"
 #include "sclp_rw.h"
@@ -336,7 +336,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
 
 #define SCLP_STORAGE_INFO_FACILITY     0x0000400000000000UL
 
-void __weak __init add_mem_detect_block(u64 start, u64 end) {}
+void __weak __init add_physmem_online_range(u64 start, u64 end) {}
 int __init sclp_early_read_storage_info(void)
 {
        struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb;
@@ -369,7 +369,7 @@ int __init sclp_early_read_storage_info(void)
                                if (!sccb->entries[sn])
                                        continue;
                                rn = sccb->entries[sn] >> 16;
-                               add_mem_detect_block((rn - 1) * rzm, rn * rzm);
+                               add_physmem_online_range((rn - 1) * rzm, rn * rzm);
                        }
                        break;
                case 0x0310:
@@ -382,6 +382,6 @@ int __init sclp_early_read_storage_info(void)
 
        return 0;
 fail:
-       mem_detect.count = 0;
+       physmem_info.range_count = 0;
        return -EIO;
 }