]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cxl: Remove core/acpi.c and cxl core dependency on ACPI
authorRobert Richter <rrichter@amd.com>
Fri, 11 Jul 2025 15:15:27 +0000 (17:15 +0200)
committerDave Jiang <dave.jiang@intel.com>
Tue, 15 Jul 2025 14:51:54 +0000 (07:51 -0700)
From Dave [1]:

"""
It was a mistake to introduce core/acpi.c and putting ACPI dependency on
cxl_core when adding the extended linear cache support.
"""

Current implementation calls hmat_get_extended_linear_cache_size() of
the ACPI subsystem. That external reference causes issue running
cxl_test as there is no way to "mock" that function and ignore it when
using cxl test.

Instead of working around that using cxlrd ops and extensively
expanding cxl_test code [1], just move HMAT calls out of the core
module to cxl_acpi. Implement this by adding a @cache_size member to
struct cxl_root_decoder. During initialization the cache size is
determined and added to the root decoder object in cxl_acpi. Later on
in cxl_core the cache_size parameter is used to setup extended linear
caching.

[1] https://patch.msgid.link/20250610172938.139428-1-dave.jiang@intel.com

[ dj: Remove core/acpi.o from tools/testing/cxl/Kbuild ]
[ dj: Add kdoc for cxlrd->cache_size ]

Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Robert Richter <rrichter@amd.com>
Reviewed-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Link: https://patch.msgid.link/20250711151529.787470-1-rrichter@amd.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
drivers/cxl/acpi.c
drivers/cxl/core/Makefile
drivers/cxl/core/acpi.c [deleted file]
drivers/cxl/core/core.h
drivers/cxl/core/region.c
drivers/cxl/cxl.h
tools/testing/cxl/Kbuild

index a1a99ec3f12cdc3efb0ccf40ce795de30cd284c1..712624cba2b6e020f38b924681c77e2d1472a96c 100644 (file)
@@ -335,6 +335,63 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
        return rc;
 }
 
+static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
+{
+       struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+       struct range *hpa = &cxld->hpa_range;
+       resource_size_t size = range_len(hpa);
+       resource_size_t start = hpa->start;
+       resource_size_t cache_size;
+       struct resource res;
+       int nid, rc;
+
+       res = DEFINE_RES(start, size, 0);
+       nid = phys_to_target_node(start);
+
+       rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
+       if (rc)
+               return rc;
+
+       /*
+        * The cache range is expected to be within the CFMWS.
+        * Currently there is only support cache_size == cxl_size. CXL
+        * size is then half of the total CFMWS window size.
+        */
+       size = size >> 1;
+       if (cache_size && size != cache_size) {
+               dev_warn(&cxld->dev,
+                        "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+                        &cache_size, &size);
+               return -ENXIO;
+       }
+
+       cxlrd->cache_size = cache_size;
+
+       return 0;
+}
+
+static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
+{
+       int rc;
+
+       rc = cxl_acpi_set_cache_size(cxlrd);
+       if (!rc)
+               return;
+
+       if (rc != -EOPNOTSUPP) {
+               /*
+                * Failing to support extended linear cache region resize does not
+                * prevent the region from functioning. Only causes cxl list showing
+                * incorrect region size.
+                */
+               dev_warn(cxlrd->cxlsd.cxld.dev.parent,
+                        "Extended linear cache calculation failed rc:%d\n", rc);
+       }
+
+       /* Ignoring return code */
+       cxlrd->cache_size = 0;
+}
+
 DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
            if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
 DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
@@ -394,6 +451,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
                ig = CXL_DECODER_MIN_GRANULARITY;
        cxld->interleave_granularity = ig;
 
+       cxl_setup_extended_linear_cache(cxlrd);
+
        if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
                if (ways != 1 && ways != 3) {
                        cxims_ctx = (struct cxl_cxims_context) {
index 79e2ef81fde8253be0d2b0882fef47c77a8f328a..5ad8fef210b5c8a871ce06d9f13a0c0f7410cfdc 100644 (file)
@@ -15,7 +15,6 @@ cxl_core-y += hdm.o
 cxl_core-y += pmu.o
 cxl_core-y += cdat.o
 cxl_core-y += ras.o
-cxl_core-y += acpi.o
 cxl_core-$(CONFIG_TRACING) += trace.o
 cxl_core-$(CONFIG_CXL_REGION) += region.o
 cxl_core-$(CONFIG_CXL_MCE) += mce.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
deleted file mode 100644 (file)
index f13b4da..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
-#include <linux/acpi.h>
-#include "cxl.h"
-#include "core.h"
-
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
-                                           int nid, resource_size_t *size)
-{
-       return hmat_get_extended_linear_cache_size(backing_res, nid, size);
-}
index 6b78b10da3e185782d626b65523f79a590cc0a67..2250c05cecc38f79d5a5f1dac3848d676f214f8f 100644 (file)
@@ -121,8 +121,6 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
 int cxl_ras_init(void);
 void cxl_ras_exit(void);
 int cxl_gpf_port_setup(struct cxl_dport *dport);
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
-                                           int nid, resource_size_t *size);
 
 #ifdef CONFIG_CXL_FEATURES
 struct cxl_feat_entry *
index 91ff3a495fbd1407e1b3487713f49d1e681ac4d1..08ac7f4835628660d338a2084ac8281c791274e6 100644 (file)
@@ -3282,15 +3282,10 @@ static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
 {
        struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
        struct cxl_region_params *p = &cxlr->params;
-       int nid = phys_to_target_node(res->start);
        resource_size_t size = resource_size(res);
        resource_size_t cache_size, start;
-       int rc;
-
-       rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
-       if (rc)
-               return rc;
 
+       cache_size = cxlrd->cache_size;
        if (!cache_size)
                return 0;
 
index e7b66ca1d4230d684e902319975fc14bfd149294..0730f92df03805ce59c65dc21842c4cedff8649c 100644 (file)
@@ -423,6 +423,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
 /**
  * struct cxl_root_decoder - Static platform CXL address decoder
  * @res: host / parent resource for region allocations
+ * @cache_size: extended linear cache size if exists, otherwise zero.
  * @region_id: region id for next region provisioning event
  * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
  * @platform_data: platform specific configuration data
@@ -432,6 +433,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
  */
 struct cxl_root_decoder {
        struct resource *res;
+       resource_size_t cache_size;
        atomic_t region_id;
        cxl_hpa_to_spa_fn hpa_to_spa;
        void *platform_data;
index 31a2d73c963f3e2075e8f6f22dd4a93fd2a36791..d07f14cb7aa4537ecab1ae38129a904560b69aa6 100644 (file)
@@ -62,7 +62,6 @@ cxl_core-y += $(CXL_CORE_SRC)/hdm.o
 cxl_core-y += $(CXL_CORE_SRC)/pmu.o
 cxl_core-y += $(CXL_CORE_SRC)/cdat.o
 cxl_core-y += $(CXL_CORE_SRC)/ras.o
-cxl_core-y += $(CXL_CORE_SRC)/acpi.o
 cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
 cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
 cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o