return 0;
}
+static void qm_xqc_reg_offsets(struct hisi_qm *qm,
+ u32 *eqc_addr, u32 *aeqc_addr)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev =
+ container_of(qm, struct hisi_acc_vf_core_device, vf_qm);
+
+ if (hisi_acc_vdev->drv_mode == HW_ACC_MIG_VF_CTRL) {
+ *eqc_addr = QM_EQC_VF_DW0;
+ *aeqc_addr = QM_AEQC_VF_DW0;
+ } else {
+ *eqc_addr = QM_EQC_PF_DW0;
+ *aeqc_addr = QM_AEQC_PF_DW0;
+ }
+}
+
static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
{
struct device *dev = &qm->pdev->dev;
+ u32 eqc_addr, aeqc_addr;
int ret;
ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
return ret;
}
+ qm_xqc_reg_offsets(qm, &eqc_addr, &aeqc_addr);
/* QM_EQC_DW has 7 regs */
- ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ ret = qm_read_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
if (ret) {
dev_err(dev, "failed to read QM_EQC_DW\n");
return ret;
}
/* QM_AEQC_DW has 7 regs */
- ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ ret = qm_read_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
if (ret) {
dev_err(dev, "failed to read QM_AEQC_DW\n");
return ret;
static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
{
struct device *dev = &qm->pdev->dev;
+ u32 eqc_addr, aeqc_addr;
int ret;
/* Check VF state */
return ret;
}
+ qm_xqc_reg_offsets(qm, &eqc_addr, &aeqc_addr);
/* QM_EQC_DW has 7 regs */
- ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
+ ret = qm_write_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
if (ret) {
dev_err(dev, "failed to write QM_EQC_DW\n");
return ret;
}
/* QM_AEQC_DW has 7 regs */
- ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
+ ret = qm_write_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
if (ret) {
dev_err(dev, "failed to write QM_AEQC_DW\n");
return ret;
{
struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
struct pci_dev *vf_dev = vdev->pdev;
+ u32 val;
- /*
- * ACC VF dev BAR2 region consists of both functional register space
- * and migration control register space. For migration to work, we
- * need access to both. Hence, we map the entire BAR2 region here.
- * But unnecessarily exposing the migration BAR region to the Guest
- * has the potential to prevent/corrupt the Guest migration. Hence,
- * we restrict access to the migration control space from
- * Guest(Please see mmap/ioctl/read/write override functions).
- *
- * Please note that it is OK to expose the entire VF BAR if migration
- * is not supported or required as this cannot affect the ACC PF
- * configurations.
- *
- * Also the HiSilicon ACC VF devices supported by this driver on
- * HiSilicon hardware platforms are integrated end point devices
- * and the platform lacks the capability to perform any PCIe P2P
- * between these devices.
- */
+ val = readl(pf_qm->io_base + QM_MIG_REGION_SEL);
+ if (pf_qm->ver > QM_HW_V3 && (val & QM_MIG_REGION_EN))
+ hisi_acc_vdev->drv_mode = HW_ACC_MIG_PF_CTRL;
+ else
+ hisi_acc_vdev->drv_mode = HW_ACC_MIG_VF_CTRL;
- vf_qm->io_base =
- ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
- pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
- if (!vf_qm->io_base)
- return -EIO;
+ if (hisi_acc_vdev->drv_mode == HW_ACC_MIG_PF_CTRL) {
+ /*
+ * On hardware platforms greater than QM_HW_V3, the migration function
+ * register is placed in the BAR2 configuration region of the PF,
+ * and each VF device occupies 8KB of configuration space.
+ */
+ vf_qm->io_base = pf_qm->io_base + QM_MIG_REGION_OFFSET +
+ hisi_acc_vdev->vf_id * QM_MIG_REGION_SIZE;
+ } else {
+ /*
+ * ACC VF dev BAR2 region consists of both functional register space
+ * and migration control register space. For migration to work, we
+ * need access to both. Hence, we map the entire BAR2 region here.
+ * But unnecessarily exposing the migration BAR region to the Guest
+ * has the potential to prevent/corrupt the Guest migration. Hence,
+ * we restrict access to the migration control space from
+ * Guest(Please see mmap/ioctl/read/write override functions).
+ *
+ * Please note that it is OK to expose the entire VF BAR if migration
+ * is not supported or required as this cannot affect the ACC PF
+ * configurations.
+ *
+ * Also the HiSilicon ACC VF devices supported by this driver on
+ * HiSilicon hardware platforms are integrated end point devices
+ * and the platform lacks the capability to perform any PCIe P2P
+ * between these devices.
+ */
+ vf_qm->io_base =
+ ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
+ pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
+ if (!vf_qm->io_base)
+ return -EIO;
+ }
vf_qm->fun_type = QM_HW_VF;
+ vf_qm->ver = pf_qm->ver;
vf_qm->pdev = vf_dev;
mutex_init(&vf_qm->mailbox_lock);
return !IS_ERR(pf_qm) ? pf_qm : NULL;
}
+static size_t hisi_acc_get_resource_len(struct vfio_pci_core_device *vdev,
+ unsigned int index)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev =
+ hisi_acc_drvdata(vdev->pdev);
+
+ /*
+ * On the old HW_ACC_MIG_VF_CTRL mode device, the ACC VF device
+ * BAR2 region encompasses both functional register space
+ * and migration control register space.
+ * only the functional region should be report to Guest.
+ */
+ if (hisi_acc_vdev->drv_mode == HW_ACC_MIG_VF_CTRL)
+ return (pci_resource_len(vdev->pdev, index) >> 1);
+ /*
+ * On the new HW device, the migration control register
+ * has been moved to the PF device BAR2 region.
+ * The VF device BAR2 is entirely functional register space.
+ */
+ return pci_resource_len(vdev->pdev, index);
+}
+
static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
size_t count, loff_t *ppos,
size_t *new_count)
if (index == VFIO_PCI_BAR2_REGION_INDEX) {
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+ resource_size_t end;
+ end = hisi_acc_get_resource_len(vdev, index);
/* Check if access is for migration control region */
if (pos >= end)
return -EINVAL;
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index == VFIO_PCI_BAR2_REGION_INDEX) {
u64 req_len, pgoff, req_start;
- resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
+ resource_size_t end;
+ end = hisi_acc_get_resource_len(vdev, index);
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
- struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
unsigned long minsz;
if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- /*
- * ACC VF dev BAR2 region consists of both functional
- * register space and migration control register space.
- * Report only the functional region to Guest.
- */
- info.size = pci_resource_len(pdev, info.index) / 2;
+ info.size = hisi_acc_get_resource_len(vdev, info.index);
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
hisi_acc_vf_disable_fds(hisi_acc_vdev);
mutex_lock(&hisi_acc_vdev->open_mutex);
hisi_acc_vdev->dev_opened = false;
- iounmap(vf_qm->io_base);
+ if (hisi_acc_vdev->drv_mode == HW_ACC_MIG_VF_CTRL)
+ iounmap(vf_qm->io_base);
mutex_unlock(&hisi_acc_vdev->open_mutex);
vfio_pci_core_close_device(core_vdev);
}