]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/x86/kernel/apic/x2apic_cluster.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / x86 / kernel / apic / x2apic_cluster.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/threads.h>
3 #include <linux/cpumask.h>
4 #include <linux/string.h>
5 #include <linux/kernel.h>
6 #include <linux/ctype.h>
7 #include <linux/dmar.h>
8 #include <linux/irq.h>
9 #include <linux/cpu.h>
10
11 #include <asm/smp.h>
12 #include <asm/x2apic.h>
13
14 static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
15 static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
16 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
17
18 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
19 {
20 return x2apic_enabled();
21 }
22
23 static inline u32 x2apic_cluster(int cpu)
24 {
25 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
26 }
27
28 static void x2apic_send_IPI(int cpu, int vector)
29 {
30 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
31
32 x2apic_wrmsr_fence();
33 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
34 }
35
36 static void
37 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
38 {
39 struct cpumask *cpus_in_cluster_ptr;
40 struct cpumask *ipi_mask_ptr;
41 unsigned int cpu, this_cpu;
42 unsigned long flags;
43 u32 dest;
44
45 x2apic_wrmsr_fence();
46
47 local_irq_save(flags);
48
49 this_cpu = smp_processor_id();
50
51 /*
52 * We are to modify mask, so we need an own copy
53 * and be sure it's manipulated with irq off.
54 */
55 ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
56 cpumask_copy(ipi_mask_ptr, mask);
57
58 /*
59 * The idea is to send one IPI per cluster.
60 */
61 for_each_cpu(cpu, ipi_mask_ptr) {
62 unsigned long i;
63
64 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
65 dest = 0;
66
67 /* Collect cpus in cluster. */
68 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
69 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
70 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
71 }
72
73 if (!dest)
74 continue;
75
76 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
77 /*
78 * Cluster sibling cpus should be discared now so
79 * we would not send IPI them second time.
80 */
81 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
82 }
83
84 local_irq_restore(flags);
85 }
86
87 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
88 {
89 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
90 }
91
92 static void
93 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
94 {
95 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
96 }
97
98 static void x2apic_send_IPI_allbutself(int vector)
99 {
100 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
101 }
102
103 static void x2apic_send_IPI_all(int vector)
104 {
105 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
106 }
107
108 static int
109 x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
110 unsigned int *apicid)
111 {
112 struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
113 unsigned int cpu;
114 u32 dest = 0;
115 u16 cluster;
116
117 cpu = cpumask_first(mask);
118 if (cpu >= nr_cpu_ids)
119 return -EINVAL;
120
121 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
122 cluster = x2apic_cluster(cpu);
123
124 cpumask_clear(effmsk);
125 for_each_cpu(cpu, mask) {
126 if (cluster != x2apic_cluster(cpu))
127 continue;
128 dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
129 cpumask_set_cpu(cpu, effmsk);
130 }
131
132 *apicid = dest;
133 return 0;
134 }
135
136 static void init_x2apic_ldr(void)
137 {
138 unsigned int this_cpu = smp_processor_id();
139 unsigned int cpu;
140
141 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
142
143 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
144 for_each_online_cpu(cpu) {
145 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
146 continue;
147 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
148 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
149 }
150 }
151
152 /*
153 * At CPU state changes, update the x2apic cluster sibling info.
154 */
155 static int x2apic_prepare_cpu(unsigned int cpu)
156 {
157 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
158 return -ENOMEM;
159
160 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
161 free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
162 return -ENOMEM;
163 }
164
165 return 0;
166 }
167
168 static int x2apic_dead_cpu(unsigned int this_cpu)
169 {
170 int cpu;
171
172 for_each_online_cpu(cpu) {
173 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
174 continue;
175 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
176 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
177 }
178 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
179 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
180 return 0;
181 }
182
183 static int x2apic_cluster_probe(void)
184 {
185 int cpu = smp_processor_id();
186 int ret;
187
188 if (!x2apic_mode)
189 return 0;
190
191 ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
192 x2apic_prepare_cpu, x2apic_dead_cpu);
193 if (ret < 0) {
194 pr_err("Failed to register X2APIC_PREPARE\n");
195 return 0;
196 }
197 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
198 return 1;
199 }
200
201 static const struct cpumask *x2apic_cluster_target_cpus(void)
202 {
203 return cpu_all_mask;
204 }
205
206 /*
207 * Each x2apic cluster is an allocation domain.
208 */
209 static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
210 const struct cpumask *mask)
211 {
212 /*
213 * To minimize vector pressure, default case of boot, device bringup
214 * etc will use a single cpu for the interrupt destination.
215 *
216 * On explicit migration requests coming from irqbalance etc,
217 * interrupts will be routed to the x2apic cluster (cluster-id
218 * derived from the first cpu in the mask) members specified
219 * in the mask.
220 */
221 if (mask == x2apic_cluster_target_cpus())
222 cpumask_copy(retmask, cpumask_of(cpu));
223 else
224 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
225 }
226
227 static struct apic apic_x2apic_cluster __ro_after_init = {
228
229 .name = "cluster x2apic",
230 .probe = x2apic_cluster_probe,
231 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
232 .apic_id_valid = x2apic_apic_id_valid,
233 .apic_id_registered = x2apic_apic_id_registered,
234
235 .irq_delivery_mode = dest_LowestPrio,
236 .irq_dest_mode = 1, /* logical */
237
238 .target_cpus = x2apic_cluster_target_cpus,
239 .disable_esr = 0,
240 .dest_logical = APIC_DEST_LOGICAL,
241 .check_apicid_used = NULL,
242
243 .vector_allocation_domain = cluster_vector_allocation_domain,
244 .init_apic_ldr = init_x2apic_ldr,
245
246 .ioapic_phys_id_map = NULL,
247 .setup_apic_routing = NULL,
248 .cpu_present_to_apicid = default_cpu_present_to_apicid,
249 .apicid_to_cpu_present = NULL,
250 .check_phys_apicid_present = default_check_phys_apicid_present,
251 .phys_pkg_id = x2apic_phys_pkg_id,
252
253 .get_apic_id = x2apic_get_apic_id,
254 .set_apic_id = x2apic_set_apic_id,
255
256 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
257
258 .send_IPI = x2apic_send_IPI,
259 .send_IPI_mask = x2apic_send_IPI_mask,
260 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
261 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
262 .send_IPI_all = x2apic_send_IPI_all,
263 .send_IPI_self = x2apic_send_IPI_self,
264
265 .inquire_remote_apic = NULL,
266
267 .read = native_apic_msr_read,
268 .write = native_apic_msr_write,
269 .eoi_write = native_apic_msr_eoi_write,
270 .icr_read = native_x2apic_icr_read,
271 .icr_write = native_x2apic_icr_write,
272 .wait_icr_idle = native_x2apic_wait_icr_idle,
273 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
274 };
275
276 apic_driver(apic_x2apic_cluster);