1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 MediaTek Inc.
6 #include <linux/dma-buf.h>
9 #include <drm/drm_device.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_dma_helper.h>
12 #include <drm/drm_prime.h>
14 #include "mtk_drm_drv.h"
15 #include "mtk_drm_gem.h"
17 static int mtk_drm_gem_object_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
);
19 static const struct vm_operations_struct vm_ops
= {
20 .open
= drm_gem_vm_open
,
21 .close
= drm_gem_vm_close
,
24 static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs
= {
25 .free
= mtk_drm_gem_free_object
,
26 .get_sg_table
= mtk_gem_prime_get_sg_table
,
27 .vmap
= mtk_drm_gem_prime_vmap
,
28 .vunmap
= mtk_drm_gem_prime_vunmap
,
29 .mmap
= mtk_drm_gem_object_mmap
,
33 static struct mtk_drm_gem_obj
*mtk_drm_gem_init(struct drm_device
*dev
,
36 struct mtk_drm_gem_obj
*mtk_gem_obj
;
39 size
= round_up(size
, PAGE_SIZE
);
41 mtk_gem_obj
= kzalloc(sizeof(*mtk_gem_obj
), GFP_KERNEL
);
43 return ERR_PTR(-ENOMEM
);
45 mtk_gem_obj
->base
.funcs
= &mtk_drm_gem_object_funcs
;
47 ret
= drm_gem_object_init(dev
, &mtk_gem_obj
->base
, size
);
49 DRM_ERROR("failed to initialize gem object\n");
57 struct mtk_drm_gem_obj
*mtk_drm_gem_create(struct drm_device
*dev
,
58 size_t size
, bool alloc_kmap
)
60 struct mtk_drm_private
*priv
= dev
->dev_private
;
61 struct mtk_drm_gem_obj
*mtk_gem
;
62 struct drm_gem_object
*obj
;
65 mtk_gem
= mtk_drm_gem_init(dev
, size
);
67 return ERR_CAST(mtk_gem
);
71 mtk_gem
->dma_attrs
= DMA_ATTR_WRITE_COMBINE
;
74 mtk_gem
->dma_attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
76 mtk_gem
->cookie
= dma_alloc_attrs(priv
->dma_dev
, obj
->size
,
77 &mtk_gem
->dma_addr
, GFP_KERNEL
,
79 if (!mtk_gem
->cookie
) {
80 DRM_ERROR("failed to allocate %zx byte dma buffer", obj
->size
);
86 mtk_gem
->kvaddr
= mtk_gem
->cookie
;
88 DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
89 mtk_gem
->cookie
, &mtk_gem
->dma_addr
,
95 drm_gem_object_release(obj
);
100 void mtk_drm_gem_free_object(struct drm_gem_object
*obj
)
102 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
103 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
106 drm_prime_gem_destroy(obj
, mtk_gem
->sg
);
108 dma_free_attrs(priv
->dma_dev
, obj
->size
, mtk_gem
->cookie
,
109 mtk_gem
->dma_addr
, mtk_gem
->dma_attrs
);
111 /* release file pointer to gem object. */
112 drm_gem_object_release(obj
);
117 int mtk_drm_gem_dumb_create(struct drm_file
*file_priv
, struct drm_device
*dev
,
118 struct drm_mode_create_dumb
*args
)
120 struct mtk_drm_gem_obj
*mtk_gem
;
123 args
->pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
124 args
->size
= args
->pitch
* args
->height
;
126 mtk_gem
= mtk_drm_gem_create(dev
, args
->size
, false);
128 return PTR_ERR(mtk_gem
);
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
134 ret
= drm_gem_handle_create(file_priv
, &mtk_gem
->base
, &args
->handle
);
136 goto err_handle_create
;
138 /* drop reference from allocate - handle holds it now. */
139 drm_gem_object_put(&mtk_gem
->base
);
144 mtk_drm_gem_free_object(&mtk_gem
->base
);
148 static int mtk_drm_gem_object_mmap(struct drm_gem_object
*obj
,
149 struct vm_area_struct
*vma
)
153 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
154 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
157 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
158 * whole buffer from the start.
163 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
164 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
166 vm_flags_set(vma
, VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
);
167 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
168 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
170 ret
= dma_mmap_attrs(priv
->dma_dev
, vma
, mtk_gem
->cookie
,
171 mtk_gem
->dma_addr
, obj
->size
, mtk_gem
->dma_attrs
);
177 * Allocate a sg_table for this GEM object.
178 * Note: Both the table's contents, and the sg_table itself must be freed by
180 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
182 struct sg_table
*mtk_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
184 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
185 struct mtk_drm_private
*priv
= obj
->dev
->dev_private
;
186 struct sg_table
*sgt
;
189 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
191 return ERR_PTR(-ENOMEM
);
193 ret
= dma_get_sgtable_attrs(priv
->dma_dev
, sgt
, mtk_gem
->cookie
,
194 mtk_gem
->dma_addr
, obj
->size
,
197 DRM_ERROR("failed to allocate sgt, %d\n", ret
);
205 struct drm_gem_object
*mtk_gem_prime_import_sg_table(struct drm_device
*dev
,
206 struct dma_buf_attachment
*attach
, struct sg_table
*sg
)
208 struct mtk_drm_gem_obj
*mtk_gem
;
210 /* check if the entries in the sg_table are contiguous */
211 if (drm_prime_get_contiguous_size(sg
) < attach
->dmabuf
->size
) {
212 DRM_ERROR("sg_table is not contiguous");
213 return ERR_PTR(-EINVAL
);
216 mtk_gem
= mtk_drm_gem_init(dev
, attach
->dmabuf
->size
);
218 return ERR_CAST(mtk_gem
);
220 mtk_gem
->dma_addr
= sg_dma_address(sg
->sgl
);
223 return &mtk_gem
->base
;
226 int mtk_drm_gem_prime_vmap(struct drm_gem_object
*obj
, struct iosys_map
*map
)
228 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
229 struct sg_table
*sgt
= NULL
;
235 sgt
= mtk_gem_prime_get_sg_table(obj
);
239 npages
= obj
->size
>> PAGE_SHIFT
;
240 mtk_gem
->pages
= kcalloc(npages
, sizeof(*mtk_gem
->pages
), GFP_KERNEL
);
241 if (!mtk_gem
->pages
) {
246 drm_prime_sg_to_page_array(sgt
, mtk_gem
->pages
, npages
);
248 mtk_gem
->kvaddr
= vmap(mtk_gem
->pages
, npages
, VM_MAP
,
249 pgprot_writecombine(PAGE_KERNEL
));
250 if (!mtk_gem
->kvaddr
) {
252 kfree(mtk_gem
->pages
);
257 iosys_map_set_vaddr(map
, mtk_gem
->kvaddr
);
262 void mtk_drm_gem_prime_vunmap(struct drm_gem_object
*obj
,
263 struct iosys_map
*map
)
265 struct mtk_drm_gem_obj
*mtk_gem
= to_mtk_gem_obj(obj
);
266 void *vaddr
= map
->vaddr
;
272 mtk_gem
->kvaddr
= NULL
;
273 kfree(mtk_gem
->pages
);