return -ENODEV;
}
+static void igen6_check(struct mem_ctl_info *mci)
+{
+ struct igen6_imc *imc = mci->pvt_info;
+ u64 ecclog;
+
+ /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
+ ecclog = ecclog_read_and_clear(imc);
+ if (!ecclog)
+ return;
+
+ if (!ecclog_gen_pool_add(imc->mc, ecclog))
+ irq_work_queue(&ecclog_irq_work);
+}
+
static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = igen6_check;
mci->pvt_info = &igen6_pvt->imc[mc];
imc = mci->pvt_info;
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
-static void opstate_set(struct res_config *cfg)
+static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
{
+ /*
+ * Quirk: Certain SoCs' error reporting interrupts don't work.
+ * Force polling mode for them to ensure that memory error
+ * events can be handled.
+ */
+ if (ent->device == DID_ADL_N_SKU4) {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ return;
+ }
+
/* Set the mode according to the configuration data. */
if (cfg->machine_check)
edac_op_state = EDAC_OPSTATE_INT;
if (rc)
goto fail;
- opstate_set(res_cfg);
+ opstate_set(res_cfg, ent);
for (i = 0; i < res_cfg->num_imc; i++) {
rc = igen6_register_mci(i, mchbar, pdev);