]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/s390/numa/numa.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / s390 / numa / numa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NUMA support for s390
4 *
5 * Implement NUMA core code.
6 *
7 * Copyright IBM Corp. 2015
8 */
9
10 #define KMSG_COMPONENT "numa"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/mmzone.h>
15 #include <linux/cpumask.h>
16 #include <linux/bootmem.h>
17 #include <linux/memblock.h>
18 #include <linux/slab.h>
19 #include <linux/node.h>
20
21 #include <asm/numa.h>
22 #include "numa_mode.h"
23
24 pg_data_t *node_data[MAX_NUMNODES];
25 EXPORT_SYMBOL(node_data);
26
27 cpumask_t node_to_cpumask_map[MAX_NUMNODES];
28 EXPORT_SYMBOL(node_to_cpumask_map);
29
30 static void plain_setup(void)
31 {
32 node_set(0, node_possible_map);
33 }
34
35 const struct numa_mode numa_mode_plain = {
36 .name = "plain",
37 .setup = plain_setup,
38 };
39
40 static const struct numa_mode *mode = &numa_mode_plain;
41
42 int numa_pfn_to_nid(unsigned long pfn)
43 {
44 return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
45 }
46
47 void numa_update_cpu_topology(void)
48 {
49 if (mode->update_cpu_topology)
50 mode->update_cpu_topology();
51 }
52
53 int __node_distance(int a, int b)
54 {
55 return mode->distance ? mode->distance(a, b) : 0;
56 }
57
58 int numa_debug_enabled;
59
60 /*
61 * alloc_node_data() - Allocate node data
62 */
63 static __init pg_data_t *alloc_node_data(void)
64 {
65 pg_data_t *res;
66
67 res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
68 memset(res, 0, sizeof(pg_data_t));
69 return res;
70 }
71
72 /*
73 * numa_setup_memory() - Assign bootmem to nodes
74 *
75 * The memory is first added to memblock without any respect to nodes.
76 * This is fixed before remaining memblock memory is handed over to the
77 * buddy allocator.
78 * An important side effect is that large bootmem allocations might easily
79 * cross node boundaries, which can be needed for large allocations with
80 * smaller memory stripes in each node (i.e. when using NUMA emulation).
81 *
82 * Memory defines nodes:
83 * Therefore this routine also sets the nodes online with memory.
84 */
85 static void __init numa_setup_memory(void)
86 {
87 unsigned long cur_base, align, end_of_dram;
88 int nid = 0;
89
90 end_of_dram = memblock_end_of_DRAM();
91 align = mode->align ? mode->align() : ULONG_MAX;
92
93 /*
94 * Step through all available memory and assign it to the nodes
95 * indicated by the mode implementation.
96 * All nodes which are seen here will be set online.
97 */
98 cur_base = 0;
99 do {
100 nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
101 node_set_online(nid);
102 memblock_set_node(cur_base, align, &memblock.memory, nid);
103 cur_base += align;
104 } while (cur_base < end_of_dram);
105
106 /* Allocate and fill out node_data */
107 for (nid = 0; nid < MAX_NUMNODES; nid++)
108 NODE_DATA(nid) = alloc_node_data();
109
110 for_each_online_node(nid) {
111 unsigned long start_pfn, end_pfn;
112 unsigned long t_start, t_end;
113 int i;
114
115 start_pfn = ULONG_MAX;
116 end_pfn = 0;
117 for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
118 if (t_start < start_pfn)
119 start_pfn = t_start;
120 if (t_end > end_pfn)
121 end_pfn = t_end;
122 }
123 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
124 NODE_DATA(nid)->node_id = nid;
125 }
126 }
127
128 /*
129 * numa_setup() - Earliest initialization
130 *
131 * Assign the mode and call the mode's setup routine.
132 */
133 void __init numa_setup(void)
134 {
135 pr_info("NUMA mode: %s\n", mode->name);
136 nodes_clear(node_possible_map);
137 if (mode->setup)
138 mode->setup();
139 numa_setup_memory();
140 memblock_dump_all();
141 }
142
143 /*
144 * numa_init_early() - Initialization initcall
145 *
146 * This runs when only one CPU is online and before the first
147 * topology update is called for by the scheduler.
148 */
149 static int __init numa_init_early(void)
150 {
151 /* Attach all possible CPUs to node 0 for now. */
152 cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
153 return 0;
154 }
155 early_initcall(numa_init_early);
156
157 /*
158 * numa_init_late() - Initialization initcall
159 *
160 * Register NUMA nodes.
161 */
162 static int __init numa_init_late(void)
163 {
164 int nid;
165
166 for_each_online_node(nid)
167 register_one_node(nid);
168 return 0;
169 }
170 arch_initcall(numa_init_late);
171
172 static int __init parse_debug(char *parm)
173 {
174 numa_debug_enabled = 1;
175 return 0;
176 }
177 early_param("numa_debug", parse_debug);
178
179 static int __init parse_numa(char *parm)
180 {
181 if (strcmp(parm, numa_mode_plain.name) == 0)
182 mode = &numa_mode_plain;
183 #ifdef CONFIG_NUMA_EMU
184 if (strcmp(parm, numa_mode_emu.name) == 0)
185 mode = &numa_mode_emu;
186 #endif
187 return 0;
188 }
189 early_param("numa", parse_numa);