]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
cxl/region: check interleave capability
authorYao Xingtao <yaoxt.fnst@fujitsu.com>
Fri, 14 Jun 2024 08:47:54 +0000 (04:47 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 5 Jul 2024 07:34:07 +0000 (09:34 +0200)
[ Upstream commit 84328c5acebc10c8cdcf17283ab6c6d548885bfc ]

Since interleave capability is not verified, if the interleave
capability of a target does not match the region need, committing decoder
should have failed at the device end.

In order to checkout this error as quickly as possible, driver needs
to check the interleave capability of target during attaching it to
region.

Per CXL specification r3.1(8.2.4.20.1 CXL HDM Decoder Capability Register),
bits 11 and 12 indicate the capability to establish interleaving in 3, 6,
12 and 16 ways. If these bits are not set, the target cannot be attached to
a region utilizing such interleave ways.

Additionally, bits 8 and 9 represent the capability of the bits used for
interleaving in the address, Linux tracks this in the cxl_port
interleave_mask.

Per CXL specification r3.1(8.2.4.20.13 Decoder Protection):
  eIW means encoded Interleave Ways.
  eIG means encoded Interleave Granularity.

  in HPA:
  if eIW is 0 or 8 (interleave ways: 1, 3), all the bits of HPA are used,
  the interleave bits are none, the following check is ignored.

  if eIW is less than 8 (interleave ways: 2, 4, 8, 16), the interleave bits
  start at bit position eIG + 8 and end at eIG + eIW + 8 - 1.

  if eIW is greater than 8 (interleave ways: 6, 12), the interleave bits
  start at bit position eIG + 8 and end at eIG + eIW - 1.

  if the interleave mask is insufficient to cover the required interleave
  bits, the target cannot be attached to the region.

Fixes: 384e624bb211 ("cxl/region: Attach endpoint decoders")
Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://patch.msgid.link/20240614084755.59503-2-yaoxt.fnst@fujitsu.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/cxl/core/hdm.c
drivers/cxl/core/region.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
tools/testing/cxl/test/cxl.c

index 90664659d5fab8cea8dbb9e4ff1778ede78d70a7..3600b7cbfb5893b55b57b1dce74acade7ead27e1 100644 (file)
@@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
        struct cxl_dport *dport = NULL;
        int single_port_map[1];
        unsigned long index;
+       struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+
+       /*
+        * Capability checks are moot for passthrough decoders, support
+        * any and all possibilities.
+        */
+       cxlhdm->interleave_mask = ~0U;
+       cxlhdm->iw_cap_mask = ~0UL;
 
        cxlsd = cxl_switch_decoder_alloc(port, 1);
        if (IS_ERR(cxlsd))
@@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
                cxlhdm->interleave_mask |= GENMASK(11, 8);
        if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
                cxlhdm->interleave_mask |= GENMASK(14, 12);
+       cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
+       if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
+               cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
+       if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
+               cxlhdm->iw_cap_mask |= BIT(16);
 }
 
 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
index 0d59af19ecee73de46236fafce3f5f4d4ba53e34..bc5a43897d578345bf46a66e02542a5bf2f12724 100644 (file)
@@ -997,6 +997,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
        }
        cxld = cxl_rr->decoder;
 
+       /*
+        * the number of targets should not exceed the target_count
+        * of the decoder
+        */
+       if (is_switch_decoder(&cxld->dev)) {
+               struct cxl_switch_decoder *cxlsd;
+
+               cxlsd = to_cxl_switch_decoder(&cxld->dev);
+               if (cxl_rr->nr_targets > cxlsd->nr_targets) {
+                       dev_dbg(&cxlr->dev,
+                               "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
+                               dev_name(port->uport_dev), dev_name(&port->dev),
+                               dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+                               dev_name(&cxled->cxld.dev), pos,
+                               cxlsd->nr_targets);
+                       rc = -ENXIO;
+                       goto out_erase;
+               }
+       }
+
        rc = cxl_rr_ep_add(cxl_rr, cxled);
        if (rc) {
                dev_dbg(&cxlr->dev,
@@ -1106,6 +1126,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
        return 0;
 }
 
+static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
+{
+       struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+       struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+       unsigned int interleave_mask;
+       u8 eiw;
+       u16 eig;
+       int high_pos, low_pos;
+
+       if (!test_bit(iw, &cxlhdm->iw_cap_mask))
+               return -ENXIO;
+       /*
+        * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
+        * if eiw < 8:
+        *   DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
+        *   DPAOFFSET[eig + 7: 0]  = HPAOFFSET[eig + 7: 0]
+        *
+        *   when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
+        *   interleave bits are none.
+        *
+        * if eiw >= 8:
+        *   DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
+        *   DPAOFFSET[eig + 7: 0]  = HPAOFFSET[eig + 7: 0]
+        *
+        *   when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
+        *   interleave bits are none.
+        */
+       ways_to_eiw(iw, &eiw);
+       if (eiw == 0 || eiw == 8)
+               return 0;
+
+       granularity_to_eig(ig, &eig);
+       if (eiw > 8)
+               high_pos = eiw + eig - 1;
+       else
+               high_pos = eiw + eig + 7;
+       low_pos = eig + 8;
+       interleave_mask = GENMASK(high_pos, low_pos);
+       if (interleave_mask & ~cxlhdm->interleave_mask)
+               return -ENXIO;
+
+       return 0;
+}
+
 static int cxl_port_setup_targets(struct cxl_port *port,
                                  struct cxl_region *cxlr,
                                  struct cxl_endpoint_decoder *cxled)
@@ -1256,6 +1320,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
                        return -ENXIO;
                }
        } else {
+               rc = check_interleave_cap(cxld, iw, ig);
+               if (rc) {
+                       dev_dbg(&cxlr->dev,
+                               "%s:%s iw: %d ig: %d is not supported\n",
+                               dev_name(port->uport_dev),
+                               dev_name(&port->dev), iw, ig);
+                       return rc;
+               }
+
                cxld->interleave_ways = iw;
                cxld->interleave_granularity = ig;
                cxld->hpa_range = (struct range) {
@@ -1692,6 +1765,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
        struct cxl_dport *dport;
        int rc = -ENXIO;
 
+       rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
+                                 p->interleave_granularity);
+       if (rc) {
+               dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
+                       dev_name(&cxled->cxld.dev), p->interleave_ways,
+                       p->interleave_granularity);
+               return rc;
+       }
+
        if (cxled->mode != cxlr->mode) {
                dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
                        dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
index de2c250c894b10ae0f7fa21aa6d5c798c4162ee0..bb3ad219b6b3162e472ad2ec943c643726ca5e89 100644 (file)
@@ -43,6 +43,8 @@
 #define   CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
 #define   CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
 #define   CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
+#define   CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
+#define   CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
 #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
 #define   CXL_HDM_DECODER_ENABLE BIT(1)
 #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
index 6933bc20e76b6b33a32994ae322cbf06c4c78540..59c7f88b915a4302e322f880c8ffaa22159590d8 100644 (file)
@@ -888,11 +888,21 @@ static inline void cxl_mem_active_dec(void)
 
 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
 
+/**
+ * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
+ * @regs: mapped registers, see devm_cxl_setup_hdm()
+ * @decoder_count: number of decoders for this port
+ * @target_count: for switch decoders, max downstream port targets
+ * @interleave_mask: interleave granularity capability, see check_interleave_cap()
+ * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
+ * @port: mapped cxl_port, see devm_cxl_setup_hdm()
+ */
 struct cxl_hdm {
        struct cxl_component_regs regs;
        unsigned int decoder_count;
        unsigned int target_count;
        unsigned int interleave_mask;
+       unsigned long iw_cap_mask;
        struct cxl_port *port;
 };
 
index f4e517a0c7740ffa2dfb4889231d42fad438a5a9..8251718eaf3a8eae417cd4aca43aff734df443d1 100644 (file)
@@ -624,11 +624,15 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
                                          struct cxl_endpoint_dvsec_info *info)
 {
        struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
+       struct device *dev = &port->dev;
 
        if (!cxlhdm)
                return ERR_PTR(-ENOMEM);
 
        cxlhdm->port = port;
+       cxlhdm->interleave_mask = ~0U;
+       cxlhdm->iw_cap_mask = ~0UL;
+       dev_set_drvdata(dev, cxlhdm);
        return cxlhdm;
 }