destroy_workqueue(events->wq);
mutex_lock(&xdna->dev_lock);
- dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
- events->addr, DMA_FROM_DEVICE);
+ aie2_free_msg_buffer(ndev, events->size, events->buf, events->addr);
kfree(events);
}
if (!events)
return -ENOMEM;
- events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
- DMA_FROM_DEVICE, GFP_KERNEL);
+ events->buf = aie2_alloc_msg_buffer(ndev, &total_size, &events->addr);
+
if (!events->buf) {
ret = -ENOMEM;
goto free_events;
free_wq:
destroy_workqueue(events->wq);
free_buf:
- dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
- events->addr, DMA_FROM_DEVICE);
+ aie2_free_msg_buffer(ndev, events->size, events->buf, events->addr);
free_events:
kfree(events);
return ret;
return ret;
}
+void *aie2_alloc_msg_buffer(struct amdxdna_dev_hdl *ndev, u32 *size,
+ dma_addr_t *dma_addr)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int order;
+
+ *size = max(*size, SZ_8K);
+ order = get_order(*size);
+ if (order > MAX_PAGE_ORDER)
+ return NULL;
+ *size = PAGE_SIZE << order;
+
+ return dma_alloc_noncoherent(xdna->ddev.dev, *size, dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+}
+
int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
{
DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
+ u32 buf_sz = size, aie_bitmap = 0;
struct amdxdna_client *client;
dma_addr_t dma_addr;
- u32 aie_bitmap = 0;
u8 *buff_addr;
int ret;
- buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
- DMA_FROM_DEVICE, GFP_KERNEL);
+ buff_addr = aie2_alloc_msg_buffer(ndev, &buf_sz, &dma_addr);
if (!buff_addr)
return -ENOMEM;
*cols_filled = 0;
req.dump_buff_addr = dma_addr;
- req.dump_buff_size = size;
+ req.dump_buff_size = buf_sz;
req.num_cols = hweight32(aie_bitmap);
req.aie_bitmap = aie_bitmap;
*cols_filled = aie_bitmap;
fail:
- dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ aie2_free_msg_buffer(ndev, buf_sz, buff_addr, dma_addr);
return ret;
}
DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY);
struct amdxdna_dev *xdna = ndev->xdna;
dma_addr_t dma_addr;
+ u32 buf_sz = size;
u8 *addr;
int ret;
if (header->type >= MAX_TELEMETRY_TYPE)
return -EINVAL;
- addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
- DMA_FROM_DEVICE, GFP_KERNEL);
+ addr = aie2_alloc_msg_buffer(ndev, &buf_sz, &dma_addr);
if (!addr)
return -ENOMEM;
req.buf_addr = dma_addr;
- req.buf_size = size;
+ req.buf_size = buf_sz;
req.type = header->type;
drm_clflush_virt_range(addr, size); /* device can access */
header->minor = resp.minor;
free_buf:
- dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE);
+ aie2_free_msg_buffer(ndev, buf_sz, addr, dma_addr);
return ret;
}
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
+void *aie2_alloc_msg_buffer(struct amdxdna_dev_hdl *ndev, u32 *size,
+ dma_addr_t *dma_addr);
+#define aie2_free_msg_buffer(ndev, size, buff_addr, dma_addr) \
+ dma_free_noncoherent((ndev)->xdna->ddev.dev, size, buff_addr, \
+ dma_addr, DMA_FROM_DEVICE)
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);