1 // SPDX-License-Identifier: GPL-2.0-only
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
11 #include <linux/cpu.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/profile.h>
15 #include <linux/smp.h>
16 #include <linux/sched.h>
17 #include <linux/seq_file.h>
18 #include <linux/delay.h>
20 #include <asm/clint.h>
22 #include <asm/tlbflush.h>
23 #include <asm/cacheflush.h>
25 enum ipi_message_type
{
32 unsigned long __cpuid_to_hartid_map
[NR_CPUS
] = {
33 [0 ... NR_CPUS
-1] = INVALID_HARTID
36 void __init
smp_setup_processor_id(void)
38 cpuid_to_hartid_map(0) = boot_cpu_hartid
;
41 /* A collection of single bit ipi messages. */
43 unsigned long stats
[IPI_MAX
] ____cacheline_aligned
;
44 unsigned long bits ____cacheline_aligned
;
45 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
47 int riscv_hartid_to_cpuid(int hartid
)
51 for (i
= 0; i
< NR_CPUS
; i
++)
52 if (cpuid_to_hartid_map(i
) == hartid
)
55 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid
);
59 void riscv_cpuid_to_hartid_mask(const struct cpumask
*in
, struct cpumask
*out
)
65 cpumask_set_cpu(cpuid_to_hartid_map(cpu
), out
);
67 EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask
);
69 bool arch_match_cpu_phys_id(int cpu
, u64 phys_id
)
71 return phys_id
== cpuid_to_hartid_map(cpu
);
75 int setup_profiling_timer(unsigned int multiplier
)
80 static void ipi_stop(void)
82 set_cpu_online(smp_processor_id(), false);
87 static void send_ipi_mask(const struct cpumask
*mask
, enum ipi_message_type op
)
89 struct cpumask hartid_mask
;
92 smp_mb__before_atomic();
93 for_each_cpu(cpu
, mask
)
94 set_bit(op
, &ipi_data
[cpu
].bits
);
95 smp_mb__after_atomic();
97 riscv_cpuid_to_hartid_mask(mask
, &hartid_mask
);
98 if (IS_ENABLED(CONFIG_RISCV_SBI
))
99 sbi_send_ipi(cpumask_bits(&hartid_mask
));
101 clint_send_ipi_mask(mask
);
104 static void send_ipi_single(int cpu
, enum ipi_message_type op
)
106 int hartid
= cpuid_to_hartid_map(cpu
);
108 smp_mb__before_atomic();
109 set_bit(op
, &ipi_data
[cpu
].bits
);
110 smp_mb__after_atomic();
112 if (IS_ENABLED(CONFIG_RISCV_SBI
))
113 sbi_send_ipi(cpumask_bits(cpumask_of(hartid
)));
115 clint_send_ipi_single(hartid
);
118 static inline void clear_ipi(void)
120 if (IS_ENABLED(CONFIG_RISCV_SBI
))
121 csr_clear(CSR_IP
, IE_SIE
);
123 clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
126 void riscv_software_interrupt(void)
128 unsigned long *pending_ipis
= &ipi_data
[smp_processor_id()].bits
;
129 unsigned long *stats
= ipi_data
[smp_processor_id()].stats
;
136 /* Order bit clearing and data access. */
139 ops
= xchg(pending_ipis
, 0);
143 if (ops
& (1 << IPI_RESCHEDULE
)) {
144 stats
[IPI_RESCHEDULE
]++;
148 if (ops
& (1 << IPI_CALL_FUNC
)) {
149 stats
[IPI_CALL_FUNC
]++;
150 generic_smp_call_function_interrupt();
153 if (ops
& (1 << IPI_CPU_STOP
)) {
154 stats
[IPI_CPU_STOP
]++;
158 BUG_ON((ops
>> IPI_MAX
) != 0);
160 /* Order data access and bit testing. */
165 static const char * const ipi_names
[] = {
166 [IPI_RESCHEDULE
] = "Rescheduling interrupts",
167 [IPI_CALL_FUNC
] = "Function call interrupts",
168 [IPI_CPU_STOP
] = "CPU stop interrupts",
171 void show_ipi_stats(struct seq_file
*p
, int prec
)
175 for (i
= 0; i
< IPI_MAX
; i
++) {
176 seq_printf(p
, "%*s%u:%s", prec
- 1, "IPI", i
,
177 prec
>= 4 ? " " : "");
178 for_each_online_cpu(cpu
)
179 seq_printf(p
, "%10lu ", ipi_data
[cpu
].stats
[i
]);
180 seq_printf(p
, " %s\n", ipi_names
[i
]);
184 void arch_send_call_function_ipi_mask(struct cpumask
*mask
)
186 send_ipi_mask(mask
, IPI_CALL_FUNC
);
189 void arch_send_call_function_single_ipi(int cpu
)
191 send_ipi_single(cpu
, IPI_CALL_FUNC
);
194 void smp_send_stop(void)
196 unsigned long timeout
;
198 if (num_online_cpus() > 1) {
201 cpumask_copy(&mask
, cpu_online_mask
);
202 cpumask_clear_cpu(smp_processor_id(), &mask
);
204 if (system_state
<= SYSTEM_RUNNING
)
205 pr_crit("SMP: stopping secondary CPUs\n");
206 send_ipi_mask(&mask
, IPI_CPU_STOP
);
209 /* Wait up to one second for other CPUs to stop */
210 timeout
= USEC_PER_SEC
;
211 while (num_online_cpus() > 1 && timeout
--)
214 if (num_online_cpus() > 1)
215 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
216 cpumask_pr_args(cpu_online_mask
));
219 void smp_send_reschedule(int cpu
)
221 send_ipi_single(cpu
, IPI_RESCHEDULE
);
223 EXPORT_SYMBOL_GPL(smp_send_reschedule
);