]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cxl/region: Translate HPA to DPA and memdev in unaligned regions
authorAlison Schofield <alison.schofield@intel.com>
Fri, 16 Jan 2026 04:58:37 +0000 (20:58 -0800)
committerDave Jiang <dave.jiang@intel.com>
Thu, 22 Jan 2026 23:58:14 +0000 (16:58 -0700)
The CXL driver supports an expert user debugfs interface to inject and
clear poison by a region offset. That feature requires translating a
HPA (the region address) to a DPA and a memdev to perform the poison
operation.

Unaligned regions do not have an algebraically invertible mapping
from HPA to DPA due to the region offset skew. The region base is not
aligned to a full interleave. Add a helper to perform the unaligned
translations that first calculates the DPA offset and then tests it
against each candidate endpoint decoder.

Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Alison Schofield <alison.schofield@intel.com>
Link: https://patch.msgid.link/f338b7aff7e4574fcc525b1a0d4f09786bfb6489.1768538962.git.alison.schofield@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
drivers/cxl/core/region.c

index cdfa454b940db91ed9189313268ae61f3a3ca6d8..d5979000fba12a1136b8ab3b46f44ffeb2217a57 100644 (file)
@@ -3314,6 +3314,48 @@ struct dpa_result {
        u64 dpa;
 };
 
+static int unaligned_region_offset_to_dpa_result(struct cxl_region *cxlr,
+                                                u64 offset,
+                                                struct dpa_result *result)
+{
+       struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+       struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+       struct cxl_region_params *p = &cxlr->params;
+       u64 interleave_width, interleave_index;
+       u64 gran, gran_offset, dpa_offset;
+       u64 hpa = p->res->start + offset;
+
+       /*
+        * Unaligned addresses are not algebraically invertible. Calculate
+        * a dpa_offset independent of the target device and then enumerate
+        * and test that dpa_offset against each candidate endpoint decoder.
+        */
+       gran = cxld->interleave_granularity;
+       interleave_width = gran * cxld->interleave_ways;
+       interleave_index = div64_u64(offset, interleave_width);
+       gran_offset = div64_u64_rem(offset, gran, NULL);
+
+       dpa_offset = interleave_index * gran + gran_offset;
+
+       for (int i = 0; i < p->nr_targets; i++) {
+               struct cxl_endpoint_decoder *cxled = p->targets[i];
+               int pos = cxled->pos;
+               u64 test_hpa;
+
+               test_hpa = unaligned_dpa_to_hpa(cxld, p, pos, dpa_offset);
+               if (test_hpa == hpa) {
+                       result->cxlmd = cxled_to_memdev(cxled);
+                       result->dpa =
+                               cxl_dpa_resource_start(cxled) + dpa_offset;
+                       return 0;
+               }
+       }
+       dev_err(&cxlr->dev,
+               "failed to resolve HPA %#llx in unaligned MOD3 region\n", hpa);
+
+       return -ENXIO;
+}
+
 static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
                                       struct dpa_result *result)
 {
@@ -3343,6 +3385,10 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
                hpa_offset = offset;
        }
 
+       if (region_is_unaligned_mod3(cxlr))
+               return unaligned_region_offset_to_dpa_result(cxlr, offset,
+                                                            result);
+
        pos = cxl_calculate_position(hpa_offset, eiw, eig);
        if (pos < 0 || pos >= p->nr_targets) {
                dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",