struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ /*
+ * Tear down the dedicated outbound window used for MSI
+ * generation. This avoids leaking an iATU window across
+ * endpoint stop/start cycles.
+ */
+ if (ep->msi_iatu_mapped) {
+ dw_pcie_ep_unmap_addr(epc, 0, 0, ep->msi_mem_phys);
+ ep->msi_iatu_mapped = false;
+ }
+
dw_pcie_stop_link(pci);
}
msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- map_size);
- if (ret)
- return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
+ /*
+ * Program the outbound iATU once and keep it enabled.
+ *
+ * The spec warns that updating iATU registers while there are
+ * operations in flight on the AXI bridge interface is not
+ * supported, so we avoid reprogramming the region on every MSI,
+ * specifically unmapping immediately after writel().
+ */
+ if (!ep->msi_iatu_mapped) {
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0,
+ ep->msi_mem_phys, msg_addr,
+ map_size);
+ if (ret)
+ return ret;
+
+ ep->msi_iatu_mapped = true;
+ ep->msi_msg_addr = msg_addr;
+ ep->msi_map_size = map_size;
+ } else if (WARN_ON_ONCE(ep->msi_msg_addr != msg_addr ||
+ ep->msi_map_size != map_size)) {
+ /*
+ * The host changed the MSI target address or the required
+ * mapping size changed. Reprogramming the iATU at runtime is
+ * unsafe on this controller, so bail out instead of trying to
+ * update the existing region.
+ */
+ return -EINVAL;
+ }
- dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
return 0;
}
struct device *dev = pci->dev;
INIT_LIST_HEAD(&ep->func_list);
+ ep->msi_iatu_mapped = false;
+ ep->msi_msg_addr = 0;
+ ep->msi_map_size = 0;
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {