return 0;
}
+static void hisi_acc_vf_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &qm->pdev->dev;
+ u32 delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ msleep(1);
+ if (++delay > QM_RESET_WAIT_TIMEOUT) {
+ dev_err(dev, "reset prepare failed\n");
+ return;
+ }
+ }
+
+ hisi_acc_vdev->set_reset_flag = true;
+}
+
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
+
+ if (hisi_acc_vdev->set_reset_flag)
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+ .reset_prepare = hisi_acc_vf_pci_reset_prepare,
.reset_done = hisi_acc_vf_pci_aer_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
#define ERROR_CHECK_TIMEOUT 100
#define CHECK_DELAY_TIME 100
+#define QM_RESET_WAIT_TIMEOUT 60000
#define QM_SQC_VFT_BASE_SHIFT_V2 28
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
u8 match_done;
+ bool set_reset_flag;
/*
* io_base is only valid when dev_opened is true,
* which is protected by open_mutex.