if (!bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
- ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, ivpu_bo_size(bo),
ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
}
int
-ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only)
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
+ struct sg_table *sgt, size_t bo_size, bool llc_coherent, bool read_only)
{
size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
+ size_t sgt_size = 0;
int ret;
u64 prot;
u64 i;
ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
+ if (sgt_size + size > bo_size) {
+ ivpu_err(vdev, "Scatter-gather table size exceeds buffer object size\n");
+ ret = -EINVAL;
+ goto err_unmap_pages;
+ }
+
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
goto err_unmap_pages;
}
vpu_addr += size;
+ sgt_size += size;
+ }
+
+ if (sgt_size < bo_size) {
+ ivpu_err(vdev, "Scatter-gather table size too small to cover buffer object size\n");
+ ret = -EINVAL;
+ goto err_unmap_pages;
}
if (!ctx->is_cd_valid) {
return 0;
err_unmap_pages:
- ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+ ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, sgt_size);
mutex_unlock(&ctx->lock);
return ret;
}
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
-int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only);
+int
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
+ struct sg_table *sgt, size_t bo_size, bool llc_coherent, bool read_only);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,