return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
- else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
+ else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev) ||
+ pdev_is_gen6(ndev->ntb.pdev))
return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
- } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
+ } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev) ||
+ pdev_is_gen6(pdev)) {
ndev->ntb.ops = &intel_ntb4_ops;
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
- pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev) ||
+ pdev_is_gen6(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
- pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
+ pdev_is_gen4(pdev) || pdev_is_gen5(pdev) ||
+ pdev_is_gen6(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
/* GEN5 PCIe */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
+ /* GEN6 PCIe */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_DMR)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
.spad = GEN4_EM_SPAD_OFFSET,
};
+static u64 get_ppd0(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
+ return GEN4_PPD0_OFFSET;
+ else if (pdev_is_gen6(pdev))
+ return GEN6_PPD0_OFFSET;
+
+ return ULLONG_MAX;
+}
+
static int gen4_poll_link(struct intel_ntb_dev *ndev)
{
u16 reg_val;
int gen4_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
- u32 ppd1/*, ppd0*/;
+ u32 ppd1;
u16 lnkctl;
int rc;
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
if (pdev_is_ICX(pdev))
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
- else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
+ else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev) || pdev_is_gen6(pdev))
ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
enum ntb_speed max_speed, enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
+ struct pci_dev *pdev;
u32 ntb_ctl, ppd0;
u16 lnkctl;
ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+ pdev = ntb->pdev;
dev_dbg(&ntb->pdev->dev,
"Enabling link with max_speed %d max_width %d\n",
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
/* start link training in PPD0 */
- ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ ppd0 = ioread32(ndev->self_mmio + get_ppd0(pdev));
ppd0 |= GEN4_PPD_LINKTRN;
- iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
+ iowrite32(ppd0, ndev->self_mmio + get_ppd0(pdev));
/* make sure link training has started */
- ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ ppd0 = ioread32(ndev->self_mmio + get_ppd0(pdev));
if (!(ppd0 & GEN4_PPD_LINKTRN)) {
dev_warn(&ntb->pdev->dev, "Link is not training\n");
return -ENXIO;
#define NTB_LTR_IDLE_LATSCALE 0x0800 /* 1us scale */
#define NTB_LTR_IDLE_REQMNT 0x8000 /* snoop req enable */
+#define GEN6_PPD0_OFFSET 0xf0d4
+
ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
int gen4_init_dev(struct intel_ntb_dev *ndev);
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
#define PCI_DEVICE_ID_INTEL_NTB_B2B_GNR 0x0db4
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_DMR 0x7868
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_GNR;
}
+static inline int pdev_is_gen6(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_DMR;
+}
+
#endif