When injecting IPIs to a set of harts, the IMSIC IPI support will do a
separate MMIO write to the SETIPNUM_LE register of each target hart. This
means on a platform where IMSIC is trap-n-emulated, there will be N MMIO
traps when injecting IPI to N target harts hence IMSIC IPIs will be slow on
such platforms compared to the SBI IPI extension.
Unfortunately, there is no DT, ACPI, or any other way of discovering
whether the underlying IMSIC is trap-n-emulated. Using MMIO write to the
SETIPNUM_LE register for injecting IPI is purely a software choice in the
IMSIC driver hence add a kernel parameter to allow users to disable IMSIC
IPIs on platforms with trap-n-emulated IMSIC.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250716123745.557585-1-apatel@ventanamicro.com
requires the kernel to be built with
CONFIG_ARM64_PSEUDO_NMI.
+ irqchip.riscv_imsic_noipi
+ [RISC-V,EARLY]
+ Force the kernel to not use IMSIC software injected MSIs
+ as IPIs. Intended for system where IMSIC is trap-n-emulated,
+ and thus want to reduce MMIO traps when triggering IPIs
+ to multiple harts.
+
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include "irq-riscv-imsic-state.h"
static int imsic_parent_irq;
+bool imsic_noipi __ro_after_init;
+
+static int __init imsic_noipi_cfg(char *buf)
+{
+ imsic_noipi = true;
+ return 0;
+}
+early_param("irqchip.riscv_imsic_noipi", imsic_noipi_cfg);
#ifdef CONFIG_SMP
static void imsic_ipi_send(unsigned int cpu)
static void imsic_ipi_starting_cpu(void)
{
+ if (imsic_noipi)
+ return;
+
/* Enable IPIs for current CPU. */
__imsic_id_set_enable(IMSIC_IPI_ID);
}
static void imsic_ipi_dying_cpu(void)
{
+ if (imsic_noipi)
+ return;
+
/* Disable IPIs for current CPU. */
__imsic_id_clear_enable(IMSIC_IPI_ID);
}
{
int virq;
+ if (imsic_noipi)
+ return 0;
+
/* Create IMSIC IPI multiplexing */
virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
if (virq <= 0)
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
local_id >>= TOPEI_ID_SHIFT;
- if (local_id == IMSIC_IPI_ID) {
+ if (!imsic_noipi && local_id == IMSIC_IPI_ID) {
if (IS_ENABLED(CONFIG_SMP))
ipi_mux_process();
continue;
lockdep_assert_held(&lpriv->lock);
for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
- if (!i || i == IMSIC_IPI_ID)
+ if (!i || (!imsic_noipi && i == IMSIC_IPI_ID))
goto skip;
vec = &lpriv->vectors[i];
seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
seq_printf(m, "%*sis_reserved : %5u\n", ind, "",
- (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
+ (!imsic_noipi && vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0);
seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
if (mvec) {
irq_matrix_assign_system(imsic->matrix, 0, false);
/* Reserve IPI ID because it is special and used internally */
- irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
+ if (!imsic_noipi)
+ irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
return 0;
}
struct irq_domain *base_domain;
};
+extern bool imsic_noipi;
extern struct imsic_priv *imsic;
void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val);