#include <linux/pci-epf.h>
#include <linux/msi.h>
#include <linux/bitfield.h>
+#include <linux/sizes.h>
#include "dw-edma-core.h"
-#define DW_PCIE_VSEC_DMA_ID 0x6
-#define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8)
-#define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0)
-#define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0)
-#define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16)
+/* Synopsys */
+#define DW_PCIE_SYNOPSYS_VSEC_DMA_ID 0x6
+#define DW_PCIE_SYNOPSYS_VSEC_DMA_BAR GENMASK(10, 8)
+#define DW_PCIE_SYNOPSYS_VSEC_DMA_MAP GENMASK(2, 0)
+#define DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH GENMASK(9, 0)
+#define DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH GENMASK(25, 16)
+
+/* AMD MDB (Xilinx) specific defines */
+#define PCI_DEVICE_ID_XILINX_B054 0xb054
+
+#define DW_PCIE_XILINX_MDB_VSEC_DMA_ID 0x6
+#define DW_PCIE_XILINX_MDB_VSEC_ID 0x20
+#define DW_PCIE_XILINX_MDB_VSEC_DMA_BAR GENMASK(10, 8)
+#define DW_PCIE_XILINX_MDB_VSEC_DMA_MAP GENMASK(2, 0)
+#define DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH GENMASK(9, 0)
+#define DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH GENMASK(25, 16)
+
+#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH 0xc
+#define DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW 0x8
+#define DW_PCIE_XILINX_MDB_INVALID_ADDR (~0ULL)
+
+#define DW_PCIE_XILINX_MDB_LL_OFF_GAP 0x200000
+#define DW_PCIE_XILINX_MDB_LL_SIZE 0x800
+#define DW_PCIE_XILINX_MDB_DT_OFF_GAP 0x100000
+#define DW_PCIE_XILINX_MDB_DT_SIZE 0x800
#define DW_BLOCK(a, b, c) \
{ \
u8 irqs;
u16 wr_ch_cnt;
u16 rd_ch_cnt;
+ u64 devmem_phys_off;
};
static const struct dw_edma_pcie_data snps_edda_data = {
.rd_ch_cnt = 2,
};
+static const struct dw_edma_pcie_data xilinx_mdb_data = {
+ /* MDB registers location */
+ .rg.bar = BAR_0,
+ .rg.off = SZ_4K, /* 4 Kbytes */
+ .rg.sz = SZ_8K, /* 8 Kbytes */
+
+ /* Other */
+ .mf = EDMA_MF_HDMA_NATIVE,
+ .irqs = 1,
+ .wr_ch_cnt = 8,
+ .rd_ch_cnt = 8,
+};
+
+static void dw_edma_set_chan_region_offset(struct dw_edma_pcie_data *pdata,
+ enum pci_barno bar, off_t start_off,
+ off_t ll_off_gap, size_t ll_size,
+ off_t dt_off_gap, size_t dt_size)
+{
+ u16 wr_ch = pdata->wr_ch_cnt;
+ u16 rd_ch = pdata->rd_ch_cnt;
+ off_t off;
+ u16 i;
+
+ off = start_off;
+
+ /* Write channel LL region */
+ for (i = 0; i < wr_ch; i++) {
+ pdata->ll_wr[i].bar = bar;
+ pdata->ll_wr[i].off = off;
+ pdata->ll_wr[i].sz = ll_size;
+ off += ll_off_gap;
+ }
+
+ /* Read channel LL region */
+ for (i = 0; i < rd_ch; i++) {
+ pdata->ll_rd[i].bar = bar;
+ pdata->ll_rd[i].off = off;
+ pdata->ll_rd[i].sz = ll_size;
+ off += ll_off_gap;
+ }
+
+ /* Write channel data region */
+ for (i = 0; i < wr_ch; i++) {
+ pdata->dt_wr[i].bar = bar;
+ pdata->dt_wr[i].off = off;
+ pdata->dt_wr[i].sz = dt_size;
+ off += dt_off_gap;
+ }
+
+ /* Read channel data region */
+ for (i = 0; i < rd_ch; i++) {
+ pdata->dt_rd[i].bar = bar;
+ pdata->dt_rd[i].off = off;
+ pdata->dt_rd[i].sz = dt_size;
+ off += dt_off_gap;
+ }
+}
+
static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
{
return pci_irq_vector(to_pci_dev(dev), nr);
.pci_address = dw_edma_pcie_address,
};
-static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
- struct dw_edma_pcie_data *pdata)
+static void dw_edma_pcie_get_synopsys_dma_data(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata)
{
u32 val, map;
u16 vsec;
u64 off;
vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
- DW_PCIE_VSEC_DMA_ID);
+ DW_PCIE_SYNOPSYS_VSEC_DMA_ID);
if (!vsec)
return;
PCI_VNDR_HEADER_LEN(val) != 0x18)
return;
- pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
+ pci_dbg(pdev, "Detected Synopsys PCIe Vendor-Specific Extended Capability DMA\n");
pci_read_config_dword(pdev, vsec + 0x8, &val);
- map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
+ map = FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
map != EDMA_MF_HDMA_COMPAT &&
return;
pdata->mf = map;
- pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
+ pdata->rg.bar = FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_BAR, val);
pci_read_config_dword(pdev, vsec + 0xc, &val);
pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
- FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
+ FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_WR_CH, val));
pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
- FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
+ FIELD_GET(DW_PCIE_SYNOPSYS_VSEC_DMA_RD_CH, val));
pci_read_config_dword(pdev, vsec + 0x14, &val);
off = val;
pdata->rg.off = off;
}
+static void dw_edma_pcie_get_xilinx_dma_data(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata)
+{
+ u32 val, map;
+ u16 vsec;
+ u64 off;
+
+ pdata->devmem_phys_off = DW_PCIE_XILINX_MDB_INVALID_ADDR;
+
+ vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
+ DW_PCIE_XILINX_MDB_VSEC_DMA_ID);
+ if (!vsec)
+ return;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
+ PCI_VNDR_HEADER_LEN(val) != 0x18)
+ return;
+
+ pci_dbg(pdev, "Detected Xilinx PCIe Vendor-Specific Extended Capability DMA\n");
+ pci_read_config_dword(pdev, vsec + 0x8, &val);
+ map = FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_MAP, val);
+ if (map != EDMA_MF_HDMA_NATIVE)
+ return;
+
+ pdata->mf = map;
+ pdata->rg.bar = FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_BAR, val);
+
+ pci_read_config_dword(pdev, vsec + 0xc, &val);
+ pdata->wr_ch_cnt = min(pdata->wr_ch_cnt,
+ FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_WR_CH, val));
+ pdata->rd_ch_cnt = min(pdata->rd_ch_cnt,
+ FIELD_GET(DW_PCIE_XILINX_MDB_VSEC_DMA_RD_CH, val));
+
+ pci_read_config_dword(pdev, vsec + 0x14, &val);
+ off = val;
+ pci_read_config_dword(pdev, vsec + 0x10, &val);
+ off <<= 32;
+ off |= val;
+ pdata->rg.off = off;
+
+ vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_XILINX,
+ DW_PCIE_XILINX_MDB_VSEC_ID);
+ if (!vsec)
+ return;
+
+ pci_read_config_dword(pdev,
+ vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_HIGH,
+ &val);
+ off = val;
+ pci_read_config_dword(pdev,
+ vsec + DW_PCIE_XILINX_MDB_DEVMEM_OFF_REG_LOW,
+ &val);
+ off <<= 32;
+ off |= val;
+ pdata->devmem_phys_off = off;
+}
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
+ dw_edma_pcie_get_synopsys_dma_data(pdev, vsec_data);
+
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX) {
+ dw_edma_pcie_get_xilinx_dma_data(pdev, vsec_data);
+
+ /*
+ * There is no valid address found for the LL memory
+ * space on the device side.
+ */
+ if (vsec_data->devmem_phys_off == DW_PCIE_XILINX_MDB_INVALID_ADDR)
+ return -ENOMEM;
+
+ /*
+ * Configure the channel LL and data blocks if number of
+ * channels enabled in VSEC capability are more than the
+ * channels configured in xilinx_mdb_data.
+ */
+ dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
+ DW_PCIE_XILINX_MDB_LL_OFF_GAP,
+ DW_PCIE_XILINX_MDB_LL_SIZE,
+ DW_PCIE_XILINX_MDB_DT_OFF_GAP,
+ DW_PCIE_XILINX_MDB_DT_SIZE);
+ }
/* Mapping PCI BAR regions */
mask = BIT(vsec_data->rg.bar);
static const struct pci_device_id dw_edma_pcie_id_table[] = {
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_XILINX_B054),
+ (kernel_ulong_t)&xilinx_mdb_data },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);