Unlike the cxl_pci class driver that opportunistically enables memory
expansion with no other dependent functionality, CXL accelerator drivers
have distinct PCIe-only and CXL-enhanced operation states. If CXL is
available some additional coherent memory/cache operations can be enabled,
otherwise traditional DMA+MMIO over PCIe/CXL.io is a fallback.
This constitutes a new mode of operation where the caller of
devm_cxl_add_memdev() wants to make a "go/no-go" decision about running
in CXL accelerated mode or falling back to PCIe-only operation. Part of
that decision making process likely also includes additional
CXL-acceleration-specific resource setup. Encapsulate both of those
requirements into 'struct cxl_memdev_attach' that provides a ->probe()
callback. The probe callback runs in cxl_mem_probe() context, after the
port topology is successfully attached for the given memdev. It supports
a contract where, upon successful return from devm_cxl_add_memdev(),
everything needed for CXL accelerated operation has been enabled.
Additionally the presence of @cxlmd->attach indicates that the accelerator
driver be detached when CXL operation ends. This conceptually makes a CXL
link loss event mirror a PCIe link loss event which results in triggering
the ->remove() callback of affected devices+drivers. A driver can re-attach
to recover back to PCIe-only operation. Live recovery, i.e. without a
->remove()/->probe() cycle, is left as a future consideration.
[ dj: Repalce with updated commit log from Dan ]
Cc: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
Reviewed-by: Ben Cheatham <benjamin.cheatham@amd.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20251216005616.3090129-7-dan.j.williams@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
struct cxl_memdev *cxlmd;
cxlmd = container_of(work, typeof(*cxlmd), detach_work);
- device_release_driver(&cxlmd->dev);
+
+ /*
+ * When the creator of @cxlmd sets ->attach it indicates CXL operation
+ * is required. In that case, @cxlmd detach escalates to parent device
+ * detach.
+ */
+ if (cxlmd->attach)
+ device_release_driver(cxlmd->dev.parent);
+ else
+ device_release_driver(&cxlmd->dev);
put_device(&cxlmd->dev);
}
static struct lock_class_key cxl_memdev_key;
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
- const struct file_operations *fops)
+ const struct file_operations *fops,
+ const struct cxl_memdev_attach *attach)
{
struct cxl_memdev *cxlmd;
struct device *dev;
goto err;
cxlmd->id = rc;
cxlmd->depth = -1;
+ cxlmd->attach = attach;
+ cxlmd->endpoint = ERR_PTR(-ENXIO);
dev = &cxlmd->dev;
device_initialize(dev);
{
int rc;
+ /*
+ * If @attach is provided fail if the driver is not attached upon
+ * return. Note that failure here could be the result of a race to
+ * teardown the CXL port topology. I.e. cxl_mem_probe() could have
+ * succeeded and then cxl_mem unbound before the lock is acquired.
+ */
+ guard(device)(&cxlmd->dev);
+ if (cxlmd->attach && !cxlmd->dev.driver) {
+ cxl_memdev_unregister(cxlmd);
+ return ERR_PTR(-ENXIO);
+ }
+
rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister,
cxlmd);
if (rc)
* Core helper for devm_cxl_add_memdev() that wants to both create a device and
* assert to the caller that upon return cxl_mem::probe() has been invoked.
*/
-struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
+ const struct cxl_memdev_attach *attach)
{
struct device *dev;
int rc;
struct cxl_memdev *cxlmd __free(put_cxlmd) =
- cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
+ cxl_memdev_alloc(cxlds, &cxl_memdev_fops, attach);
if (IS_ERR(cxlmd))
return cxlmd;
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
+struct cxl_memdev_attach {
+ int (*probe)(struct cxl_memdev *cxlmd);
+};
+
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @endpoint: connection to the CXL port topology for this memory device
+ * @attach: creator of this memdev depends on CXL link attach to operate
* @id: id number of this memdev instance.
* @depth: endpoint port depth
* @scrub_cycle: current scrub cycle set for this device
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct cxl_port *endpoint;
+ const struct cxl_memdev_attach *attach;
int id;
int depth;
u8 scrub_cycle;
return is_cxl_memdev(port->uport_dev);
}
-struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
+struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
+ const struct cxl_memdev_attach *attach);
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
+ const struct cxl_memdev_attach *attach);
int devm_cxl_sanitize_setup_notifier(struct device *host,
struct cxl_memdev *cxlmd);
struct cxl_memdev_state;
return rc;
}
+ if (cxlmd->attach) {
+ rc = cxlmd->attach->probe(cxlmd);
+ if (rc)
+ return rc;
+ }
+
rc = devm_cxl_memdev_edac_register(cxlmd);
if (rc)
dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
/**
* devm_cxl_add_memdev - Add a CXL memory device
* @cxlds: CXL device state to associate with the memdev
+ * @attach: Caller depends on CXL topology attachment
*
* Upon return the device will have had a chance to attach to the
- * cxl_mem driver, but may fail if the CXL topology is not ready
- * (hardware CXL link down, or software platform CXL root not attached)
+ * cxl_mem driver, but may fail to attach if the CXL topology is not ready
+ * (hardware CXL link down, or software platform CXL root not attached).
+ *
+ * When @attach is NULL it indicates the caller wants the memdev to remain
+ * registered even if it does not immediately attach to the CXL hierarchy. When
+ * @attach is provided a cxl_mem_probe() failure leads to failure of this routine.
*
* The parent of the resulting device and the devm context for allocations is
* @cxlds->dev.
*/
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
+ const struct cxl_memdev_attach *attach)
{
- return __devm_cxl_add_memdev(cxlds);
+ return __devm_cxl_add_memdev(cxlds, attach);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");
if (rc)
dev_dbg(&pdev->dev, "No CXL Features discovered\n");
- cxlmd = devm_cxl_add_memdev(cxlds);
+ cxlmd = devm_cxl_add_memdev(cxlds, NULL);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
cxl_mock_add_event_logs(&mdata->mes);
- cxlmd = devm_cxl_add_memdev(cxlds);
+ cxlmd = devm_cxl_add_memdev(cxlds, NULL);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);