]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/xen/xen_drm_front_gem.c
drm/amd/display: Check hpd_gpio for NULL before accessing it
[thirdparty/kernel/stable.git] / drivers / gpu / drm / xen / xen_drm_front_gem.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11 #include "xen_drm_front_gem.h"
12
13 #include <drm/drmP.h>
14 #include <drm/drm_crtc_helper.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
17
18 #include <linux/dma-buf.h>
19 #include <linux/scatterlist.h>
20 #include <linux/shmem_fs.h>
21
22 #include <xen/balloon.h>
23
24 #include "xen_drm_front.h"
25
26 struct xen_gem_object {
27 struct drm_gem_object base;
28
29 size_t num_pages;
30 struct page **pages;
31
32 /* set for buffers allocated by the backend */
33 bool be_alloc;
34
35 /* this is for imported PRIME buffer */
36 struct sg_table *sgt_imported;
37 };
38
39 static inline struct xen_gem_object *
40 to_xen_gem_obj(struct drm_gem_object *gem_obj)
41 {
42 return container_of(gem_obj, struct xen_gem_object, base);
43 }
44
45 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 size_t buf_size)
47 {
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 sizeof(struct page *), GFP_KERNEL);
51 return !xen_obj->pages ? -ENOMEM : 0;
52 }
53
54 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
55 {
56 kvfree(xen_obj->pages);
57 xen_obj->pages = NULL;
58 }
59
60 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
61 size_t size)
62 {
63 struct xen_gem_object *xen_obj;
64 int ret;
65
66 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
67 if (!xen_obj)
68 return ERR_PTR(-ENOMEM);
69
70 ret = drm_gem_object_init(dev, &xen_obj->base, size);
71 if (ret < 0) {
72 kfree(xen_obj);
73 return ERR_PTR(ret);
74 }
75
76 return xen_obj;
77 }
78
79 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
80 {
81 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
82 struct xen_gem_object *xen_obj;
83 int ret;
84
85 size = round_up(size, PAGE_SIZE);
86 xen_obj = gem_create_obj(dev, size);
87 if (IS_ERR_OR_NULL(xen_obj))
88 return xen_obj;
89
90 if (drm_info->front_info->cfg.be_alloc) {
91 /*
92 * backend will allocate space for this buffer, so
93 * only allocate array of pointers to pages
94 */
95 ret = gem_alloc_pages_array(xen_obj, size);
96 if (ret < 0)
97 goto fail;
98
99 /*
100 * allocate ballooned pages which will be used to map
101 * grant references provided by the backend
102 */
103 ret = alloc_xenballooned_pages(xen_obj->num_pages,
104 xen_obj->pages);
105 if (ret < 0) {
106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
107 xen_obj->num_pages, ret);
108 gem_free_pages_array(xen_obj);
109 goto fail;
110 }
111
112 xen_obj->be_alloc = true;
113 return xen_obj;
114 }
115 /*
116 * need to allocate backing pages now, so we can share those
117 * with the backend
118 */
119 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
120 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
121 if (IS_ERR_OR_NULL(xen_obj->pages)) {
122 ret = PTR_ERR(xen_obj->pages);
123 xen_obj->pages = NULL;
124 goto fail;
125 }
126
127 return xen_obj;
128
129 fail:
130 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
131 return ERR_PTR(ret);
132 }
133
134 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
135 size_t size)
136 {
137 struct xen_gem_object *xen_obj;
138
139 xen_obj = gem_create(dev, size);
140 if (IS_ERR_OR_NULL(xen_obj))
141 return ERR_CAST(xen_obj);
142
143 return &xen_obj->base;
144 }
145
146 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
147 {
148 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
149
150 if (xen_obj->base.import_attach) {
151 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
152 gem_free_pages_array(xen_obj);
153 } else {
154 if (xen_obj->pages) {
155 if (xen_obj->be_alloc) {
156 free_xenballooned_pages(xen_obj->num_pages,
157 xen_obj->pages);
158 gem_free_pages_array(xen_obj);
159 } else {
160 drm_gem_put_pages(&xen_obj->base,
161 xen_obj->pages, true, false);
162 }
163 }
164 }
165 drm_gem_object_release(gem_obj);
166 kfree(xen_obj);
167 }
168
169 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
170 {
171 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
172
173 return xen_obj->pages;
174 }
175
176 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
177 {
178 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
179
180 if (!xen_obj->pages)
181 return ERR_PTR(-ENOMEM);
182
183 return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
184 }
185
186 struct drm_gem_object *
187 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
188 struct dma_buf_attachment *attach,
189 struct sg_table *sgt)
190 {
191 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
192 struct xen_gem_object *xen_obj;
193 size_t size;
194 int ret;
195
196 size = attach->dmabuf->size;
197 xen_obj = gem_create_obj(dev, size);
198 if (IS_ERR_OR_NULL(xen_obj))
199 return ERR_CAST(xen_obj);
200
201 ret = gem_alloc_pages_array(xen_obj, size);
202 if (ret < 0)
203 return ERR_PTR(ret);
204
205 xen_obj->sgt_imported = sgt;
206
207 ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
208 NULL, xen_obj->num_pages);
209 if (ret < 0)
210 return ERR_PTR(ret);
211
212 ret = xen_drm_front_dbuf_create(drm_info->front_info,
213 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
214 0, 0, 0, size, xen_obj->pages);
215 if (ret < 0)
216 return ERR_PTR(ret);
217
218 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
219 size, sgt->nents);
220
221 return &xen_obj->base;
222 }
223
224 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
225 struct vm_area_struct *vma)
226 {
227 unsigned long addr = vma->vm_start;
228 int i;
229
230 /*
231 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
232 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
233 * the whole buffer.
234 */
235 vma->vm_flags &= ~VM_PFNMAP;
236 vma->vm_flags |= VM_MIXEDMAP;
237 vma->vm_pgoff = 0;
238 vma->vm_page_prot =
239 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
240
241 /*
242 * vm_operations_struct.fault handler will be called if CPU access
243 * to VM is here. For GPUs this isn't the case, because CPU
244 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
245 * happy.
246 * FIXME: as we insert all the pages now then no .fault handler must
247 * be called, so don't provide one
248 */
249 for (i = 0; i < xen_obj->num_pages; i++) {
250 int ret;
251
252 ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
253 if (ret < 0) {
254 DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
255 return ret;
256 }
257
258 addr += PAGE_SIZE;
259 }
260 return 0;
261 }
262
263 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
264 {
265 struct xen_gem_object *xen_obj;
266 struct drm_gem_object *gem_obj;
267 int ret;
268
269 ret = drm_gem_mmap(filp, vma);
270 if (ret < 0)
271 return ret;
272
273 gem_obj = vma->vm_private_data;
274 xen_obj = to_xen_gem_obj(gem_obj);
275 return gem_mmap_obj(xen_obj, vma);
276 }
277
278 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
279 {
280 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
281
282 if (!xen_obj->pages)
283 return NULL;
284
285 return vmap(xen_obj->pages, xen_obj->num_pages,
286 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
287 }
288
289 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
290 void *vaddr)
291 {
292 vunmap(vaddr);
293 }
294
295 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
296 struct vm_area_struct *vma)
297 {
298 struct xen_gem_object *xen_obj;
299 int ret;
300
301 ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
302 if (ret < 0)
303 return ret;
304
305 xen_obj = to_xen_gem_obj(gem_obj);
306 return gem_mmap_obj(xen_obj, vma);
307 }