]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
cxl/core/region: move pmem region driver logic into region_pmem.c
authorGregory Price <gourry@gourry.net>
Fri, 27 Mar 2026 02:02:01 +0000 (22:02 -0400)
committerDave Jiang <dave.jiang@intel.com>
Fri, 27 Mar 2026 18:45:21 +0000 (11:45 -0700)
core/region.c is overloaded with per-region control logic (pmem, dax,
sysram, etc). Move the pmem region driver logic from region.c into
region_pmem.c make it clear that this code only applies to pmem regions.

No functional changes.

[ dj: Fixed up some tabbing issues, may be from original code. ]

Signed-off-by: Gregory Price <gourry@gourry.net>
Co-developed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Alison Schofield <alison.schofield@intel.com>
Link: https://patch.msgid.link/20260327020203.876122-2-gourry@gourry.net
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
drivers/cxl/core/Makefile
drivers/cxl/core/core.h
drivers/cxl/core/region.c
drivers/cxl/core/region_pmem.c [new file with mode: 0644]
tools/testing/cxl/Kbuild

index a639a949997202fb795d0d9c6da0ef0b06ec96ee..f73776fe323be2e1dc87aab4eca2c0c8d71cdd42 100644 (file)
@@ -15,7 +15,7 @@ cxl_core-y += hdm.o
 cxl_core-y += pmu.o
 cxl_core-y += cdat.o
 cxl_core-$(CONFIG_TRACING) += trace.o
-cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_REGION) += region.o region_pmem.o
 cxl_core-$(CONFIG_CXL_MCE) += mce.o
 cxl_core-$(CONFIG_CXL_FEATURES) += features.o
 cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
index 5b0570df0fd9c9629d68956aa7a3dabe45639a37..2fa5f2f58c9bd0c272b6591018da56d613fd93f8 100644 (file)
@@ -50,6 +50,7 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port);
 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
                   u64 dpa);
+int devm_cxl_add_pmem_region(struct cxl_region *cxlr);
 
 #else
 static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
index 42874948b589b95a498230a93f5951f30317c0eb..cf1b7e0617f3254afcc618f8b7f9e33afb23111c 100644 (file)
@@ -2757,46 +2757,6 @@ static ssize_t delete_region_store(struct device *dev,
 }
 DEVICE_ATTR_WO(delete_region);
 
-static void cxl_pmem_region_release(struct device *dev)
-{
-       struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
-       int i;
-
-       for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
-               struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
-
-               put_device(&cxlmd->dev);
-       }
-
-       kfree(cxlr_pmem);
-}
-
-static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-const struct device_type cxl_pmem_region_type = {
-       .name = "cxl_pmem_region",
-       .release = cxl_pmem_region_release,
-       .groups = cxl_pmem_region_attribute_groups,
-};
-
-bool is_cxl_pmem_region(struct device *dev)
-{
-       return dev->type == &cxl_pmem_region_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
-
-struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
-                         "not a cxl_pmem_region device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_pmem_region, dev);
-}
-EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
-
 struct cxl_poison_context {
        struct cxl_port *port;
        int part;
@@ -3450,64 +3410,6 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
        return -ENXIO;
 }
 
-static struct lock_class_key cxl_pmem_region_key;
-
-static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
-{
-       struct cxl_region_params *p = &cxlr->params;
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-       int i;
-
-       guard(rwsem_read)(&cxl_rwsem.region);
-       if (p->state != CXL_CONFIG_COMMIT)
-               return -ENXIO;
-
-       struct cxl_pmem_region *cxlr_pmem __free(kfree) =
-               kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
-       if (!cxlr_pmem)
-               return -ENOMEM;
-
-       cxlr_pmem->hpa_range.start = p->res->start;
-       cxlr_pmem->hpa_range.end = p->res->end;
-
-       /* Snapshot the region configuration underneath the cxl_rwsem.region */
-       cxlr_pmem->nr_mappings = p->nr_targets;
-       for (i = 0; i < p->nr_targets; i++) {
-               struct cxl_endpoint_decoder *cxled = p->targets[i];
-               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-               struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
-
-               /*
-                * Regions never span CXL root devices, so by definition the
-                * bridge for one device is the same for all.
-                */
-               if (i == 0) {
-                       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
-                       if (!cxl_nvb)
-                               return -ENODEV;
-                       cxlr->cxl_nvb = cxl_nvb;
-               }
-               m->cxlmd = cxlmd;
-               get_device(&cxlmd->dev);
-               m->start = cxled->dpa_res->start;
-               m->size = resource_size(cxled->dpa_res);
-               m->position = i;
-       }
-
-       dev = &cxlr_pmem->dev;
-       device_initialize(dev);
-       lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
-       device_set_pm_not_required(dev);
-       dev->parent = &cxlr->dev;
-       dev->bus = &cxl_bus_type;
-       dev->type = &cxl_pmem_region_type;
-       cxlr_pmem->cxlr = cxlr;
-       cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
-
-       return 0;
-}
-
 static void cxl_dax_region_release(struct device *dev)
 {
        struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
@@ -3571,92 +3473,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
        return cxlr_dax;
 }
 
-static void cxlr_pmem_unregister(void *_cxlr_pmem)
-{
-       struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
-       struct cxl_region *cxlr = cxlr_pmem->cxlr;
-       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
-
-       /*
-        * Either the bridge is in ->remove() context under the device_lock(),
-        * or cxlr_release_nvdimm() is cancelling the bridge's release action
-        * for @cxlr_pmem and doing it itself (while manually holding the bridge
-        * lock).
-        */
-       device_lock_assert(&cxl_nvb->dev);
-       cxlr->cxlr_pmem = NULL;
-       cxlr_pmem->cxlr = NULL;
-       device_unregister(&cxlr_pmem->dev);
-}
-
-static void cxlr_release_nvdimm(void *_cxlr)
-{
-       struct cxl_region *cxlr = _cxlr;
-       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
-
-       scoped_guard(device, &cxl_nvb->dev) {
-               if (cxlr->cxlr_pmem)
-                       devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
-                                           cxlr->cxlr_pmem);
-       }
-       cxlr->cxl_nvb = NULL;
-       put_device(&cxl_nvb->dev);
-}
-
-/**
- * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
- * @cxlr: parent CXL region for this pmem region bridge device
- *
- * Return: 0 on success negative error code on failure.
- */
-static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
-{
-       struct cxl_pmem_region *cxlr_pmem;
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-       int rc;
-
-       rc = cxl_pmem_region_alloc(cxlr);
-       if (rc)
-               return rc;
-       cxlr_pmem = cxlr->cxlr_pmem;
-       cxl_nvb = cxlr->cxl_nvb;
-
-       dev = &cxlr_pmem->dev;
-       rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
-               dev_name(dev));
-
-       scoped_guard(device, &cxl_nvb->dev) {
-               if (cxl_nvb->dev.driver)
-                       rc = devm_add_action_or_reset(&cxl_nvb->dev,
-                                                     cxlr_pmem_unregister,
-                                                     cxlr_pmem);
-               else
-                       rc = -ENXIO;
-       }
-
-       if (rc)
-               goto err_bridge;
-
-       /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
-       return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
-
-err:
-       put_device(dev);
-err_bridge:
-       put_device(&cxl_nvb->dev);
-       cxlr->cxl_nvb = NULL;
-       return rc;
-}
-
 static void cxlr_dax_unregister(void *_cxlr_dax)
 {
        struct cxl_dax_region *cxlr_dax = _cxlr_dax;
diff --git a/drivers/cxl/core/region_pmem.c b/drivers/cxl/core/region_pmem.c
new file mode 100644 (file)
index 0000000..23d97e3
--- /dev/null
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+static void cxl_pmem_region_release(struct device *dev)
+{
+       struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
+       int i;
+
+       for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
+               struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
+
+               put_device(&cxlmd->dev);
+       }
+
+       kfree(cxlr_pmem);
+}
+
+static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
+       &cxl_base_attribute_group,
+       NULL
+};
+
+const struct device_type cxl_pmem_region_type = {
+       .name = "cxl_pmem_region",
+       .release = cxl_pmem_region_release,
+       .groups = cxl_pmem_region_attribute_groups,
+};
+
+bool is_cxl_pmem_region(struct device *dev)
+{
+       return dev->type == &cxl_pmem_region_type;
+}
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
+
+struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
+                         "not a cxl_pmem_region device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_pmem_region, dev);
+}
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
+
+static struct lock_class_key cxl_pmem_region_key;
+
+static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
+{
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+       int i;
+
+       guard(rwsem_read)(&cxl_rwsem.region);
+       if (p->state != CXL_CONFIG_COMMIT)
+               return -ENXIO;
+
+       struct cxl_pmem_region *cxlr_pmem __free(kfree) =
+               kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
+       if (!cxlr_pmem)
+               return -ENOMEM;
+
+       cxlr_pmem->hpa_range.start = p->res->start;
+       cxlr_pmem->hpa_range.end = p->res->end;
+
+       /* Snapshot the region configuration underneath the cxl_rwsem.region */
+       cxlr_pmem->nr_mappings = p->nr_targets;
+       for (i = 0; i < p->nr_targets; i++) {
+               struct cxl_endpoint_decoder *cxled = p->targets[i];
+               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+               struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
+
+               /*
+                * Regions never span CXL root devices, so by definition the
+                * bridge for one device is the same for all.
+                */
+               if (i == 0) {
+                       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
+                       if (!cxl_nvb)
+                               return -ENODEV;
+                       cxlr->cxl_nvb = cxl_nvb;
+               }
+               m->cxlmd = cxlmd;
+               get_device(&cxlmd->dev);
+               m->start = cxled->dpa_res->start;
+               m->size = resource_size(cxled->dpa_res);
+               m->position = i;
+       }
+
+       dev = &cxlr_pmem->dev;
+       device_initialize(dev);
+       lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+       device_set_pm_not_required(dev);
+       dev->parent = &cxlr->dev;
+       dev->bus = &cxl_bus_type;
+       dev->type = &cxl_pmem_region_type;
+       cxlr_pmem->cxlr = cxlr;
+       cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
+
+       return 0;
+}
+
+static void cxlr_pmem_unregister(void *_cxlr_pmem)
+{
+       struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem;
+       struct cxl_region *cxlr = cxlr_pmem->cxlr;
+       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
+
+       /*
+        * Either the bridge is in ->remove() context under the device_lock(),
+        * or cxlr_release_nvdimm() is cancelling the bridge's release action
+        * for @cxlr_pmem and doing it itself (while manually holding the bridge
+        * lock).
+        */
+       device_lock_assert(&cxl_nvb->dev);
+       cxlr->cxlr_pmem = NULL;
+       cxlr_pmem->cxlr = NULL;
+       device_unregister(&cxlr_pmem->dev);
+}
+
+static void cxlr_release_nvdimm(void *_cxlr)
+{
+       struct cxl_region *cxlr = _cxlr;
+       struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
+
+       scoped_guard(device, &cxl_nvb->dev) {
+               if (cxlr->cxlr_pmem)
+                       devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
+                                           cxlr->cxlr_pmem);
+       }
+       cxlr->cxl_nvb = NULL;
+       put_device(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
+ * @cxlr: parent CXL region for this pmem region bridge device
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+{
+       struct cxl_pmem_region *cxlr_pmem;
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+       int rc;
+
+       rc = cxl_pmem_region_alloc(cxlr);
+       if (rc)
+               return rc;
+       cxlr_pmem = cxlr->cxlr_pmem;
+       cxl_nvb = cxlr->cxl_nvb;
+
+       dev = &cxlr_pmem->dev;
+       rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
+               dev_name(dev));
+
+       scoped_guard(device, &cxl_nvb->dev) {
+               if (cxl_nvb->dev.driver)
+                       rc = devm_add_action_or_reset(&cxl_nvb->dev,
+                                                     cxlr_pmem_unregister,
+                                                     cxlr_pmem);
+               else
+                       rc = -ENXIO;
+       }
+
+       if (rc)
+               goto err_bridge;
+
+       /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
+       return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr);
+
+err:
+       put_device(dev);
+err_bridge:
+       put_device(&cxl_nvb->dev);
+       cxlr->cxl_nvb = NULL;
+       return rc;
+}
index 53d84a6874b76b0371d599069c2d2ab32f2553fd..f53d79a0566158ad75abc2176e1a8d23be9df6db 100644 (file)
@@ -59,7 +59,7 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
 cxl_core-y += $(CXL_CORE_SRC)/pmu.o
 cxl_core-y += $(CXL_CORE_SRC)/cdat.o
 cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
-cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o $(CXL_CORE_SRC)/region_pmem.o
 cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
 cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
 cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += $(CXL_CORE_SRC)/edac.o