return IRQ_HANDLED;
}
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static void plda_handle_event(struct irq_desc *desc)
{
struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
return 0;
}
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+};
+
static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
{
struct device *dev = port->dev;
writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
}
-static int plda_init_interrupts(struct platform_device *pdev, struct plda_pcie_rp *port)
+static int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
{
struct device *dev = &pdev->dev;
int irq;
return -ENXIO;
}
- ret = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
if (ret) {
dev_err(dev, "failed to request IRQ %d\n", event_irq);
return ret;
return ret;
/* Address translation is up; safe to enable interrupts */
- ret = plda_init_interrupts(pdev, &port->plda);
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
if (ret)
return ret;