Towards the end goal of making all CXL RAS capability handling uniform
across host bridge ports, upstream switch ports, and endpoint ports, move
dport RAS setup. Move it to cxl_switch_port_probe() context for switch / VH
dports (via cxl_port_add_dport()) and cxl_endpoint_port_probe() context for
an RCH dport. Rename the RAS setup helper to devm_cxl_dport_ras_setup() for
symmetry with devm_cxl_switch_port_decoders_setup().
Only the RCH version needs to be exported and the cxl_test mocking can be
deleted with a dev_is_pci() check on the dport_dev.
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Tested-by: Terry Bowman <terry.bowman@amd.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Link: https://patch.msgid.link/20260131000403.2135324-7-dan.j.williams@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
+static inline struct device *dport_to_host(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ if (is_cxl_root(port))
+ return port->uport_dev;
+ return &port->dev;
+}
#ifdef CONFIG_CXL_RAS
int cxl_ras_init(void);
void cxl_ras_exit(void);
void cxl_dport_map_rch_aer(struct cxl_dport *dport);
void cxl_disable_rch_root_ints(struct cxl_dport *dport);
void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds);
+void devm_cxl_dport_ras_setup(struct cxl_dport *dport);
#else
static inline int cxl_ras_init(void)
{
static inline void cxl_dport_map_rch_aer(struct cxl_dport *dport) { }
static inline void cxl_disable_rch_root_ints(struct cxl_dport *dport) { }
static inline void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
+static inline void devm_cxl_dport_ras_setup(struct cxl_dport *dport) { }
#endif /* CONFIG_CXL_RAS */
int cxl_gpf_port_setup(struct cxl_dport *dport);
sysfs_remove_link(&port->dev.kobj, link_name);
}
-static struct device *dport_to_host(struct cxl_dport *dport)
-{
- struct cxl_port *port = dport->port;
-
- if (is_cxl_root(port))
- return port->uport_dev;
- return &port->dev;
-}
-
static void free_dport(void *dport)
{
kfree(dport);
cxl_debugfs_create_dport_dir(dport);
+ if (!dport->rch)
+ devm_cxl_dport_ras_setup(dport);
+
/* keep the group, and mark the end of devm actions */
cxl_dport_close_dr_group(dport, no_free_ptr(dport_dr_group));
}
/**
- * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport
+ * devm_cxl_dport_ras_setup - Setup CXL RAS report on this dport
* @dport: the cxl_dport that needs to be initialized
- * @host: host device for devm operations
*/
-void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
+void devm_cxl_dport_ras_setup(struct cxl_dport *dport)
{
- dport->reg_map.host = host;
+ dport->reg_map.host = dport_to_host(dport);
cxl_dport_map_ras(dport);
+}
- if (dport->rch) {
- struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev);
+void devm_cxl_dport_rch_ras_setup(struct cxl_dport *dport)
+{
+ struct pci_host_bridge *host_bridge;
- if (!host_bridge->native_aer)
- return;
+ if (!dev_is_pci(dport->dport_dev))
+ return;
- cxl_dport_map_rch_aer(dport);
- cxl_disable_rch_root_ints(dport);
- }
+ devm_cxl_dport_ras_setup(dport);
+
+ host_bridge = to_pci_host_bridge(dport->dport_dev);
+ if (!host_bridge->native_aer)
+ return;
+
+ cxl_dport_map_rch_aer(dport);
+ cxl_disable_rch_root_ints(dport);
}
-EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL");
+EXPORT_SYMBOL_NS_GPL(devm_cxl_dport_rch_ras_setup, "CXL");
void cxl_handle_cor_ras(struct device *dev, void __iomem *ras_base)
{
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state);
-void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host);
+void devm_cxl_dport_rch_ras_setup(struct cxl_dport *dport);
#else
static inline void cxl_cor_error_detected(struct pci_dev *pdev) { }
return PCI_ERS_RESULT_NONE;
}
-static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
- struct device *host) { }
+static inline void devm_cxl_dport_rch_ras_setup(struct cxl_dport *dport)
+{
+}
#endif
#endif /* __CXL_PCI_H__ */
else
endpoint_parent = &parent_port->dev;
- cxl_dport_init_ras_reporting(dport, dev);
-
scoped_guard(device, endpoint_parent) {
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_dport *dport = port->parent_dport;
int rc;
/* Cache the data early to ensure is_visible() works */
if (rc)
return rc;
+ /*
+ * With VH (CXL Virtual Host) topology the cxl_port::add_dport() method
+ * handles RAS setup for downstream ports. With RCH (CXL Restricted CXL
+ * Host) topologies the downstream port is enumerated early by platform
+ * firmware, but the RCRB (root complex register block) is not mapped
+ * until after the cxl_pci driver attaches to the RCIeP (root complex
+ * integrated endpoint).
+ */
+ if (dport->rch)
+ devm_cxl_dport_rch_ras_setup(dport);
+
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
ldflags-y += --wrap=cxl_await_media_ready
ldflags-y += --wrap=devm_cxl_add_rch_dport
ldflags-y += --wrap=cxl_endpoint_parse_cdat
-ldflags-y += --wrap=cxl_dport_init_ras_reporting
ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
ldflags-y += --wrap=hmat_get_extended_linear_cache_size
ldflags-y += --wrap=devm_cxl_add_dport_by_dev
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, "CXL");
-void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
-{
- int index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (!ops || !ops->is_mock_port(dport->dport_dev))
- cxl_dport_init_ras_reporting(dport, host);
-
- put_cxl_mock_ops(index);
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL");
-
struct cxl_dport *__wrap_devm_cxl_add_dport_by_dev(struct cxl_port *port,
struct device *dport_dev)
{