void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlds->cxlmd->dev;
scoped_guard(device, dev) {
if (cxlds->rcd)
cxl_handle_rdport_errors(cxlds);
- cxl_handle_cor_ras(&cxlds->cxlmd->dev, cxlds->regs.ras);
+ cxl_handle_cor_ras(&cxlds->cxlmd->dev, cxlmd->endpoint->regs.ras);
}
}
EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL");
* chance the situation is recoverable dump the status of the RAS
* capability registers and bounce the active state of the memdev.
*/
- ue = cxl_handle_ras(&cxlds->cxlmd->dev, cxlds->regs.ras);
+ ue = cxl_handle_ras(&cxlds->cxlmd->dev, cxlmd->endpoint->regs.ras);
}
-
switch (state) {
case pci_channel_io_normal:
if (ue) {
* @dev: The device associated with this CXL state
* @cxlmd: The device representing the CXL.mem capabilities of @dev
* @reg_map: component and ras register mapping parameters
- * @regs: Parsed register blocks
+ * @regs: Class device "Device" registers
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_register_map reg_map;
- struct cxl_regs regs;
+ struct cxl_device_regs regs;
int cxl_dvsec;
bool rcd;
bool media_ready;
return cxl_setup_regs(map);
}
-static int cxl_pci_ras_unmask(struct pci_dev *pdev)
-{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
- void __iomem *addr;
- u32 orig_val, val, mask;
- u16 cap;
- int rc;
-
- if (!cxlds->regs.ras) {
- dev_dbg(&pdev->dev, "No RAS registers.\n");
- return 0;
- }
-
- /* BIOS has PCIe AER error control */
- if (!pcie_aer_is_native(pdev))
- return 0;
-
- rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
- if (rc)
- return rc;
-
- if (cap & PCI_EXP_DEVCTL_URRE) {
- addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
- orig_val = readl(addr);
-
- mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
- CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
- val = orig_val & ~mask;
- writel(val, addr);
- dev_dbg(&pdev->dev,
- "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
- orig_val, val);
- }
-
- if (cap & PCI_EXP_DEVCTL_CERE) {
- addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
- orig_val = readl(addr);
- val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
- writel(val, addr);
- dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n",
- orig_val, val);
- }
-
- return 0;
-}
-
static void free_event_buf(void *buf)
{
kvfree(buf);
unsigned int i;
bool irq_avail;
- /*
- * Double check the anonymous union trickery in struct cxl_regs
- * FIXME switch to struct_group()
- */
- BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
- offsetof(struct cxl_regs, device_regs.memdev));
-
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (rc)
return rc;
- rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs);
+ rc = cxl_map_device_regs(&map, &cxlds->regs);
if (rc)
return rc;
else if (!cxlds->reg_map.component_map.ras.valid)
dev_dbg(&pdev->dev, "RAS registers not found\n");
- rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
- BIT(CXL_CM_CAP_CAP_ID_RAS));
- if (rc)
- dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
-
rc = cxl_pci_type3_init_mailbox(cxlds);
if (rc)
return rc;
if (rc)
return rc;
- if (cxl_pci_ras_unmask(pdev))
- dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
-
pci_save_state(pdev);
return rc;
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
+#include <linux/aer.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
return 0;
}
+static int cxl_ras_unmask(struct cxl_port *port)
+{
+ struct pci_dev *pdev;
+ void __iomem *addr;
+ u32 orig_val, val, mask;
+ u16 cap;
+ int rc;
+
+ if (!dev_is_pci(port->uport_dev))
+ return 0;
+ pdev = to_pci_dev(port->uport_dev);
+
+ if (!port->regs.ras) {
+ pci_dbg(pdev, "No RAS registers.\n");
+ return 0;
+ }
+
+ /* BIOS has PCIe AER error control */
+ if (!pcie_aer_is_native(pdev))
+ return 0;
+
+ rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
+ if (rc)
+ return rc;
+
+ if (cap & PCI_EXP_DEVCTL_URRE) {
+ addr = port->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
+ orig_val = readl(addr);
+
+ mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
+ CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
+ val = orig_val & ~mask;
+ writel(val, addr);
+ pci_dbg(pdev, "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
+ orig_val, val);
+ }
+
+ if (cap & PCI_EXP_DEVCTL_CERE) {
+ addr = port->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
+ orig_val = readl(addr);
+ val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
+ writel(val, addr);
+ pci_dbg(pdev, "Correctable RAS Errors Mask: %#x -> %#x\n",
+ orig_val, val);
+ }
+
+ return 0;
+}
+
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
if (dport->rch)
devm_cxl_dport_rch_ras_setup(dport);
+ devm_cxl_port_ras_setup(port);
+ if (cxl_ras_unmask(port))
+ dev_dbg(&port->dev, "failed to unmask RAS interrupts\n");
+
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders