}
/**
- * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
* @port: CXL port context
*
* Return 0 or -errno on error
*/
-int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
dev_err(&port->dev, "HDM decoder capability not found\n");
return -ENXIO;
}
-EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
/**
* devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
}
/**
- * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * devm_cxl_add_dport_by_dev - allocate a dport by dport device
* @port: cxl_port that hosts the dport
* @dport_dev: 'struct device' of the dport
*
* Returns the allocated dport on success or ERR_PTR() of -errno on error
*/
-struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
- struct device *dport_dev)
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
{
struct cxl_register_map map;
struct pci_dev *pdev;
device_lock_assert(&port->dev);
return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
}
-EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
return cxl_setup_regs(map);
}
-static int cxl_port_setup_regs(struct cxl_port *port,
+int cxl_port_setup_regs(struct cxl_port *port,
resource_size_t component_reg_phys)
{
if (dev_is_platform(port->uport_dev))
return cxl_setup_comp_regs(&port->dev, &port->reg_map,
component_reg_phys);
}
+EXPORT_SYMBOL_NS_GPL(cxl_port_setup_regs, "CXL");
static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
resource_size_t component_reg_phys)
return 0;
}
+void cxl_port_update_decoder_targets(struct cxl_port *port,
+ struct cxl_dport *dport)
+{
+ device_for_each_child(&port->dev, dport, update_decoder_targets);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_update_decoder_targets, "CXL");
+
static bool dport_exists(struct cxl_port *port, struct device *dport_dev)
{
struct cxl_dport *dport = cxl_find_dport_by_dev(port, dport_dev);
return false;
}
-/* note this implicitly casts the group back to its @port */
-DEFINE_FREE(cxl_port_release_dr_group, struct cxl_port *,
- if (_T) devres_release_group(&_T->dev, _T))
-
-static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
- struct device *dport_dev)
+static struct cxl_dport *probe_dport(struct cxl_port *port,
+ struct device *dport_dev)
{
- struct cxl_dport *dport;
- int rc;
+ struct cxl_driver *drv;
device_lock_assert(&port->dev);
if (!port->dev.driver)
if (dport_exists(port, dport_dev))
return ERR_PTR(-EBUSY);
- /* Temp group for all "first dport" and "per dport" setup actions */
- void *port_dr_group __free(cxl_port_release_dr_group) =
- devres_open_group(&port->dev, port, GFP_KERNEL);
- if (!port_dr_group)
- return ERR_PTR(-ENOMEM);
-
- if (port->nr_dports == 0) {
- /*
- * Some host bridges are known to not have component regsisters
- * available until a root port has trained CXL. Perform that
- * setup now.
- */
- rc = cxl_port_setup_regs(port, port->component_reg_phys);
- if (rc)
- return ERR_PTR(rc);
-
- rc = devm_cxl_switch_port_decoders_setup(port);
- if (rc)
- return ERR_PTR(rc);
- }
-
- dport = devm_cxl_add_dport_by_dev(port, dport_dev);
- if (IS_ERR(dport))
- return dport;
-
- /* This group was only needed for early exit above */
- devres_remove_group(&port->dev, no_free_ptr(port_dr_group));
-
- cxl_switch_parse_cdat(dport);
-
- /* New dport added, update the decoder targets */
- device_for_each_child(&port->dev, dport, update_decoder_targets);
-
- dev_dbg(&port->dev, "dport%d:%s added\n", dport->port_id,
- dev_name(dport_dev));
+ drv = container_of(port->dev.driver, struct cxl_driver, drv);
+ if (!drv->add_dport)
+ return ERR_PTR(-ENXIO);
- return dport;
+ /* see cxl_port_add_dport() */
+ return drv->add_dport(port, dport_dev);
}
static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
}
guard(device)(&port->dev);
- return cxl_port_add_dport(port, dport_dev);
+ return probe_dport(port, dport_dev);
}
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
scoped_guard(device, &parent_port->dev) {
parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
if (!parent_dport) {
- parent_dport = cxl_port_add_dport(parent_port, dparent);
+ parent_dport = probe_dport(parent_port, dparent);
if (IS_ERR(parent_dport))
return PTR_ERR(parent_dport);
}
device_lock_assert(&port->dev);
dport = cxl_find_dport_by_dev(port, dport_dev);
if (!dport) {
- dport = cxl_port_add_dport(port, dport_dev);
+ dport = probe_dport(port, dport_dev);
if (IS_ERR(dport))
return dport;
};
int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
-int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+void cxl_port_update_decoder_targets(struct cxl_port *port,
+ struct cxl_dport *dport);
+int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys);
struct cxl_dev_state;
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
extern const struct bus_type cxl_bus_type;
+/*
+ * Note, add_dport() is expressly for the cxl_port driver. TODO: investigate a
+ * type-safe driver model where probe()/remove() take the type of object implied
+ * by @id and the add_dport() op only defined for the CXL_DEVICE_PORT driver
+ * template.
+ */
struct cxl_driver {
const char *name;
int (*probe)(struct device *dev);
void (*remove)(struct device *dev);
+ struct cxl_dport *(*add_dport)(struct cxl_port *port,
+ struct device *dport_dev);
struct device_driver drv;
int id;
};
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
struct device *dport_dev);
-struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
- struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
u16 cxl_gpf_get_dvsec(struct device *dev);
-/*
- * Declaration for functions that are mocked by cxl_test that are called by
- * cxl_core. The respective functions are defined as __foo() and called by
- * cxl_core as foo(). The macros below ensures that those functions would
- * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
- * tools/testing/cxl/exports.h for setting up the mock functions. The dance
- * is done to avoid a circular dependency where cxl_core calls a function that
- * ends up being a mock function and goes to * cxl_test where it calls a
- * cxl_core function.
- */
-#ifndef CXL_TEST_ENABLE
-#define DECLARE_TESTABLE(x) __##x
-#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
-#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
-#endif
-
#endif /* __CXL_H__ */
NULL,
};
+/* note this implicitly casts the group back to its @port */
+DEFINE_FREE(cxl_port_release_dr_group, struct cxl_port *,
+ if (_T) devres_release_group(&_T->dev, _T))
+
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ /* Temp group for all "first dport" and "per dport" setup actions */
+ void *port_dr_group __free(cxl_port_release_dr_group) =
+ devres_open_group(&port->dev, port, GFP_KERNEL);
+ if (!port_dr_group)
+ return ERR_PTR(-ENOMEM);
+
+ if (port->nr_dports == 0) {
+ /*
+ * Some host bridges are known to not have component regsisters
+ * available until a root port has trained CXL. Perform that
+ * setup now.
+ */
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
+ dport = devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* This group was only needed for early exit above */
+ devres_remove_group(&port->dev, no_free_ptr(port_dr_group));
+
+ cxl_switch_parse_cdat(dport);
+
+ /* New dport added, update the decoder targets */
+ cxl_port_update_decoder_targets(port, dport);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", dport->port_id,
+ dev_name(dport_dev));
+
+ return dport;
+}
+
static struct cxl_driver cxl_port_driver = {
.name = "cxl_port",
.probe = cxl_port_probe,
+ .add_dport = cxl_port_add_dport,
.id = CXL_DEVICE_PORT,
.drv = {
.dev_groups = cxl_port_attribute_groups,
ldflags-y += --wrap=cxl_dport_init_ras_reporting
ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
ldflags-y += --wrap=hmat_get_extended_linear_cache_size
+ldflags-y += --wrap=devm_cxl_add_dport_by_dev
+ldflags-y += --wrap=devm_cxl_switch_port_decoders_setup
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include "cxl.h"
-#include "exports.h"
/* Exporting of cxl_core symbols that are only used by cxl_test */
EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL");
-
-cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev =
- __devm_cxl_add_dport_by_dev;
-EXPORT_SYMBOL_NS_GPL(_devm_cxl_add_dport_by_dev, "CXL");
-
-struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
- struct device *dport_dev)
-{
- return _devm_cxl_add_dport_by_dev(port, dport_dev);
-}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
-
-cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup =
- __devm_cxl_switch_port_decoders_setup;
-EXPORT_SYMBOL_NS_GPL(_devm_cxl_switch_port_decoders_setup, "CXL");
-
-int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
-{
- return _devm_cxl_switch_port_decoders_setup(port);
-}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2025 Intel Corporation */
-#ifndef __MOCK_CXL_EXPORTS_H_
-#define __MOCK_CXL_EXPORTS_H_
-
-typedef struct cxl_dport *(*cxl_add_dport_by_dev_fn)(struct cxl_port *port,
- struct device *dport_dev);
-extern cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev;
-
-typedef int(*cxl_switch_decoders_setup_fn)(struct cxl_port *port);
-extern cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup;
-
-#endif
#include <cxlmem.h>
#include <cxlpci.h>
#include "mock.h"
-#include "../exports.h"
static LIST_HEAD(mock);
-static struct cxl_dport *
-redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
- struct device *dport_dev);
-static int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
-
void register_cxl_mock_ops(struct cxl_mock_ops *ops)
{
list_add_rcu(&ops->list, &mock);
- _devm_cxl_add_dport_by_dev = redirect_devm_cxl_add_dport_by_dev;
- _devm_cxl_switch_port_decoders_setup =
- redirect_devm_cxl_switch_port_decoders_setup;
}
EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
{
- _devm_cxl_switch_port_decoders_setup =
- __devm_cxl_switch_port_decoders_setup;
- _devm_cxl_add_dport_by_dev = __devm_cxl_add_dport_by_dev;
list_del_rcu(&ops->list);
synchronize_srcu(&cxl_mock_srcu);
}
}
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
-int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+int __wrap_devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
{
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_switch_port_decoders_setup(port);
else
- rc = __devm_cxl_switch_port_decoders_setup(port);
+ rc = devm_cxl_switch_port_decoders_setup(port);
put_cxl_mock_ops(index);
return rc;
}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_switch_port_decoders_setup, "CXL");
int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
{
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL");
-struct cxl_dport *redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
- struct device *dport_dev)
+struct cxl_dport *__wrap_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
dport = ops->devm_cxl_add_dport_by_dev(port, dport_dev);
else
- dport = __devm_cxl_add_dport_by_dev(port, dport_dev);
+ dport = devm_cxl_add_dport_by_dev(port, dport_dev);
put_cxl_mock_ops(index);
return dport;
}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_dport_by_dev, "CXL");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("cxl_test: emulation module");