u32 gce_num;
};
+static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata)
+{
+ /* Convert DMA addr (PA or IOVA) to GCE readable addr */
+ return addr >> pdata->shift;
+}
+
+static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata)
+{
+ /* Revert GCE readable addr to DMA addr (PA or IOVA) */
+ return (dma_addr_t)addr << pdata->shift;
+}
+
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
struct cmdq_task *prev_task = list_last_entry(
&thread->task_busy_list, typeof(*task), list_entry);
u64 *prev_task_base = prev_task->pkt->va_base;
+ u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
/* let previous task jump to this task */
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
- prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
- (u64)CMDQ_JUMP_BY_PA << 32 |
- (task->pa_base >> task->cmdq->pdata->shift);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr;
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
struct cmdq_thread *thread)
{
struct cmdq_task *task, *tmp, *curr_task = NULL;
- u32 curr_pa, irq_flag, task_end_pa;
+ u32 irq_flag, gce_addr;
+ dma_addr_t curr_pa, task_end_pa;
bool err;
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
else
return;
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
- unsigned long curr_pa, end_pa;
+ u32 gce_addr;
+ dma_addr_t curr_pa, end_pa;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
- writel(task->pa_base >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_CURR_ADDR);
- writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_END_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
- cmdq->pdata->shift;
- end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
- cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
+ gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
+ end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
if (err)
return err;
+ dma_set_coherent_mask(dev,
+ DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift));
+
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);