struct dma_slave_config *config)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+ bool cfg_non_ll;
+ int non_ll = 0;
+
+ chan->non_ll = false;
+ if (chan->dw->chip->mf == EDMA_MF_HDMA_NATIVE) {
+ if (config->peripheral_config &&
+ config->peripheral_size != sizeof(int)) {
+ dev_err(dchan->device->dev,
+ "config param peripheral size mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When there is no valid LLP base address available then the
+ * default DMA ops will use the non-LL mode.
+ *
+ * Cases where LL mode is enabled and client wants to use the
+ * non-LL mode then also client can do so via providing the
+ * peripheral_config param.
+ */
+ cfg_non_ll = chan->dw->chip->cfg_non_ll;
+ if (config->peripheral_config) {
+ non_ll = *(int *)config->peripheral_config;
+
+ if (cfg_non_ll && !non_ll) {
+ dev_err(dchan->device->dev, "invalid configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ if (cfg_non_ll || non_ll)
+ chan->non_ll = true;
+ } else if (config->peripheral_config) {
+ dev_err(dchan->device->dev,
+ "peripheral config param applicable only for HDMA\n");
+ return -EINVAL;
+ }
memcpy(&chan->config, config, sizeof(*config));
chan->configured = true;
struct dw_edma_desc *desc;
u64 src_addr, dst_addr;
size_t fsz = 0;
+ u32 bursts_max;
u32 cnt = 0;
int i;
return NULL;
}
+ /*
+ * For non-LL mode, only a single burst can be handled
+ * in a single chunk unlike LL mode where multiple bursts
+ * can be configured in a single chunk.
+ */
+ bursts_max = chan->non_ll ? 1 : chan->ll_max;
+
desc = dw_edma_alloc_desc(chan);
if (unlikely(!desc))
goto err_alloc;
if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
break;
- if (chunk->bursts_alloc == chan->ll_max) {
+ if (chunk->bursts_alloc == bursts_max) {
chunk = dw_edma_alloc_chunk(desc);
if (unlikely(!chunk))
goto err_alloc;
u8 configured;
struct dma_slave_config config;
+ bool non_ll;
};
struct dw_edma_irq {
pdata->devmem_phys_off = off;
}
+static u64 dw_edma_get_phys_addr(struct pci_dev *pdev,
+ struct dw_edma_pcie_data *pdata,
+ enum pci_barno bar)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX)
+ return pdata->devmem_phys_off;
+ return pci_bus_address(pdev, bar);
+}
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ bool non_ll = false;
struct dw_edma_pcie_data *vsec_data __free(kfree) =
kmalloc_obj(*vsec_data);
/*
* There is no valid address found for the LL memory
- * space on the device side.
+ * space on the device side. In the absence of LL base
+ * address use the non-LL mode or simple mode supported by
+ * the HDMA IP.
*/
if (vsec_data->devmem_phys_off == DW_PCIE_XILINX_MDB_INVALID_ADDR)
- return -ENOMEM;
+ non_ll = true;
/*
* Configure the channel LL and data blocks if number of
* channels enabled in VSEC capability are more than the
* channels configured in xilinx_mdb_data.
*/
- dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
- DW_PCIE_XILINX_MDB_LL_OFF_GAP,
- DW_PCIE_XILINX_MDB_LL_SIZE,
- DW_PCIE_XILINX_MDB_DT_OFF_GAP,
- DW_PCIE_XILINX_MDB_DT_SIZE);
+ if (!non_ll)
+ dw_edma_set_chan_region_offset(vsec_data, BAR_2, 0,
+ DW_PCIE_XILINX_MDB_LL_OFF_GAP,
+ DW_PCIE_XILINX_MDB_LL_SIZE,
+ DW_PCIE_XILINX_MDB_DT_OFF_GAP,
+ DW_PCIE_XILINX_MDB_DT_SIZE);
}
/* Mapping PCI BAR regions */
chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
+ chip->cfg_non_ll = non_ll;
chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
if (!chip->reg_base)
return -ENOMEM;
- for (i = 0; i < chip->ll_wr_cnt; i++) {
+ for (i = 0; i < chip->ll_wr_cnt && !non_ll; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
return -ENOMEM;
ll_region->vaddr.io += ll_block->off;
- ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
+ ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
+ ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
return -ENOMEM;
dt_region->vaddr.io += dt_block->off;
- dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
+ dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
+ dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
- for (i = 0; i < chip->ll_rd_cnt; i++) {
+ for (i = 0; i < chip->ll_rd_cnt && !non_ll; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
return -ENOMEM;
ll_region->vaddr.io += ll_block->off;
- ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
+ ll_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
+ ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
return -ENOMEM;
dt_region->vaddr.io += dt_block->off;
- dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
+ dt_region->paddr = dw_edma_get_phys_addr(pdev, vsec_data,
+ dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
readl(chunk->ll_region.vaddr.io);
}
-static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+static void dw_hdma_v0_core_ll_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
+static void dw_hdma_v0_core_non_ll_start(struct dw_edma_chunk *chunk)
+{
+ struct dw_edma_chan *chan = chunk->chan;
+ struct dw_edma *dw = chan->dw;
+ struct dw_edma_burst *child;
+ u32 val;
+
+ child = list_first_entry_or_null(&chunk->burst->list,
+ struct dw_edma_burst, list);
+ if (!child)
+ return;
+
+ SET_CH_32(dw, chan->dir, chan->id, ch_en, HDMA_V0_CH_EN);
+
+ /* Source address */
+ SET_CH_32(dw, chan->dir, chan->id, sar.lsb,
+ lower_32_bits(child->sar));
+ SET_CH_32(dw, chan->dir, chan->id, sar.msb,
+ upper_32_bits(child->sar));
+
+ /* Destination address */
+ SET_CH_32(dw, chan->dir, chan->id, dar.lsb,
+ lower_32_bits(child->dar));
+ SET_CH_32(dw, chan->dir, chan->id, dar.msb,
+ upper_32_bits(child->dar));
+
+ /* Transfer size */
+ SET_CH_32(dw, chan->dir, chan->id, transfer_size, child->sz);
+
+ /* Interrupt setup */
+ val = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
+ HDMA_V0_STOP_INT_MASK |
+ HDMA_V0_ABORT_INT_MASK |
+ HDMA_V0_LOCAL_STOP_INT_EN |
+ HDMA_V0_LOCAL_ABORT_INT_EN;
+
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL)) {
+ val |= HDMA_V0_REMOTE_STOP_INT_EN |
+ HDMA_V0_REMOTE_ABORT_INT_EN;
+ }
+
+ SET_CH_32(dw, chan->dir, chan->id, int_setup, val);
+
+ /* Channel control setup */
+ val = GET_CH_32(dw, chan->dir, chan->id, control1);
+ val &= ~HDMA_V0_LINKLIST_EN;
+ SET_CH_32(dw, chan->dir, chan->id, control1, val);
+
+ SET_CH_32(dw, chan->dir, chan->id, doorbell,
+ HDMA_V0_DOORBELL_START);
+}
+
+static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+{
+ struct dw_edma_chan *chan = chunk->chan;
+
+ if (chan->non_ll)
+ dw_hdma_v0_core_non_ll_start(chunk);
+ else
+ dw_hdma_v0_core_ll_start(chunk, first);
+}
+
static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
#include <linux/dmaengine.h>
#define HDMA_V0_MAX_NR_CH 8
+#define HDMA_V0_CH_EN BIT(0)
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
enum dw_edma_map_format mf;
struct dw_edma *dw;
+ bool cfg_non_ll;
};
/* Export to the platform drivers */