]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c8afe684 RC |
2 | /* |
3 | * Copyright (C) 2013 Red Hat | |
4 | * Author: Rob Clark <robdclark@gmail.com> | |
c8afe684 RC |
5 | */ |
6 | ||
7 | #include <linux/spinlock.h> | |
8 | #include <linux/shmem_fs.h> | |
05b84911 | 9 | #include <linux/dma-buf.h> |
01c8f1c4 | 10 | #include <linux/pfn_t.h> |
c8afe684 RC |
11 | |
12 | #include "msm_drv.h" | |
fde5de6c | 13 | #include "msm_fence.h" |
c8afe684 | 14 | #include "msm_gem.h" |
7198e6b0 | 15 | #include "msm_gpu.h" |
871d812a | 16 | #include "msm_mmu.h" |
c8afe684 | 17 | |
0e08270a SS |
18 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj); |
19 | ||
20 | ||
871d812a RC |
21 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
22 | { | |
23 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
24 | struct msm_drm_private *priv = obj->dev->dev_private; | |
25 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
26 | priv->vram.paddr; | |
27 | } | |
28 | ||
072f1f91 RC |
29 | static bool use_pages(struct drm_gem_object *obj) |
30 | { | |
31 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
32 | return !msm_obj->vram_node; | |
33 | } | |
34 | ||
871d812a | 35 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
0e08270a | 36 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
871d812a RC |
37 | { |
38 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
39 | struct msm_drm_private *priv = obj->dev->dev_private; | |
40 | dma_addr_t paddr; | |
41 | struct page **p; | |
42 | int ret, i; | |
43 | ||
2098105e | 44 | p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
871d812a RC |
45 | if (!p) |
46 | return ERR_PTR(-ENOMEM); | |
47 | ||
0e08270a | 48 | spin_lock(&priv->vram.lock); |
4e64e553 | 49 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
0e08270a | 50 | spin_unlock(&priv->vram.lock); |
871d812a | 51 | if (ret) { |
2098105e | 52 | kvfree(p); |
871d812a RC |
53 | return ERR_PTR(ret); |
54 | } | |
55 | ||
56 | paddr = physaddr(obj); | |
57 | for (i = 0; i < npages; i++) { | |
58 | p[i] = phys_to_page(paddr); | |
59 | paddr += PAGE_SIZE; | |
60 | } | |
61 | ||
62 | return p; | |
63 | } | |
c8afe684 | 64 | |
c8afe684 RC |
65 | static struct page **get_pages(struct drm_gem_object *obj) |
66 | { | |
67 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
68 | ||
69 | if (!msm_obj->pages) { | |
70 | struct drm_device *dev = obj->dev; | |
871d812a | 71 | struct page **p; |
c8afe684 RC |
72 | int npages = obj->size >> PAGE_SHIFT; |
73 | ||
072f1f91 | 74 | if (use_pages(obj)) |
0cdbe8ac | 75 | p = drm_gem_get_pages(obj); |
871d812a RC |
76 | else |
77 | p = get_pages_vram(obj, npages); | |
78 | ||
c8afe684 | 79 | if (IS_ERR(p)) { |
6a41da17 | 80 | DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", |
c8afe684 RC |
81 | PTR_ERR(p)); |
82 | return p; | |
83 | } | |
84 | ||
62e3a3e3 PK |
85 | msm_obj->pages = p; |
86 | ||
c8afe684 | 87 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); |
1f70e079 | 88 | if (IS_ERR(msm_obj->sgt)) { |
62e3a3e3 PK |
89 | void *ptr = ERR_CAST(msm_obj->sgt); |
90 | ||
6a41da17 | 91 | DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); |
62e3a3e3 PK |
92 | msm_obj->sgt = NULL; |
93 | return ptr; | |
c8afe684 RC |
94 | } |
95 | ||
c8afe684 RC |
96 | /* For non-cached buffers, ensure the new pages are clean |
97 | * because display controller, GPU, etc. are not coherent: | |
98 | */ | |
99 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
100 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
101 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
102 | } | |
103 | ||
104 | return msm_obj->pages; | |
105 | } | |
106 | ||
0e08270a SS |
107 | static void put_pages_vram(struct drm_gem_object *obj) |
108 | { | |
109 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
110 | struct msm_drm_private *priv = obj->dev->dev_private; | |
111 | ||
112 | spin_lock(&priv->vram.lock); | |
113 | drm_mm_remove_node(msm_obj->vram_node); | |
114 | spin_unlock(&priv->vram.lock); | |
115 | ||
116 | kvfree(msm_obj->pages); | |
117 | } | |
118 | ||
c8afe684 RC |
119 | static void put_pages(struct drm_gem_object *obj) |
120 | { | |
121 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
122 | ||
123 | if (msm_obj->pages) { | |
3976626e BH |
124 | if (msm_obj->sgt) { |
125 | /* For non-cached buffers, ensure the new | |
126 | * pages are clean because display controller, | |
127 | * GPU, etc. are not coherent: | |
128 | */ | |
129 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
130 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
131 | msm_obj->sgt->nents, | |
132 | DMA_BIDIRECTIONAL); | |
62e3a3e3 | 133 | |
62e3a3e3 | 134 | sg_free_table(msm_obj->sgt); |
3976626e BH |
135 | kfree(msm_obj->sgt); |
136 | } | |
c8afe684 | 137 | |
072f1f91 | 138 | if (use_pages(obj)) |
871d812a | 139 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
0e08270a SS |
140 | else |
141 | put_pages_vram(obj); | |
871d812a | 142 | |
c8afe684 RC |
143 | msm_obj->pages = NULL; |
144 | } | |
145 | } | |
146 | ||
05b84911 RC |
147 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
148 | { | |
0e08270a | 149 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
05b84911 | 150 | struct page **p; |
0e08270a SS |
151 | |
152 | mutex_lock(&msm_obj->lock); | |
153 | ||
154 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { | |
155 | mutex_unlock(&msm_obj->lock); | |
156 | return ERR_PTR(-EBUSY); | |
157 | } | |
158 | ||
05b84911 | 159 | p = get_pages(obj); |
0e08270a | 160 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
161 | return p; |
162 | } | |
163 | ||
164 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
165 | { | |
166 | /* when we start tracking the pin count, then do something here */ | |
167 | } | |
168 | ||
c8afe684 RC |
169 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
170 | struct vm_area_struct *vma) | |
171 | { | |
172 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
173 | ||
174 | vma->vm_flags &= ~VM_PFNMAP; | |
175 | vma->vm_flags |= VM_MIXEDMAP; | |
176 | ||
177 | if (msm_obj->flags & MSM_BO_WC) { | |
178 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
179 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
180 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
181 | } else { | |
182 | /* | |
183 | * Shunt off cached objs to shmem file so they have their own | |
184 | * address_space (so unmap_mapping_range does what we want, | |
185 | * in particular in the case of mmap'd dmabufs) | |
186 | */ | |
187 | fput(vma->vm_file); | |
188 | get_file(obj->filp); | |
189 | vma->vm_pgoff = 0; | |
190 | vma->vm_file = obj->filp; | |
191 | ||
192 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
193 | } | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
198 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
199 | { | |
200 | int ret; | |
201 | ||
202 | ret = drm_gem_mmap(filp, vma); | |
203 | if (ret) { | |
204 | DBG("mmap failed: %d", ret); | |
205 | return ret; | |
206 | } | |
207 | ||
208 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
209 | } | |
210 | ||
a5f74ec7 | 211 | vm_fault_t msm_gem_fault(struct vm_fault *vmf) |
c8afe684 | 212 | { |
11bac800 | 213 | struct vm_area_struct *vma = vmf->vma; |
c8afe684 | 214 | struct drm_gem_object *obj = vma->vm_private_data; |
0e08270a | 215 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
216 | struct page **pages; |
217 | unsigned long pfn; | |
218 | pgoff_t pgoff; | |
a5f74ec7 SJ |
219 | int err; |
220 | vm_fault_t ret; | |
c8afe684 | 221 | |
0e08270a SS |
222 | /* |
223 | * vm_ops.open/drm_gem_mmap_obj and close get and put | |
224 | * a reference on obj. So, we dont need to hold one here. | |
c8afe684 | 225 | */ |
a5f74ec7 SJ |
226 | err = mutex_lock_interruptible(&msm_obj->lock); |
227 | if (err) { | |
228 | ret = VM_FAULT_NOPAGE; | |
c8afe684 | 229 | goto out; |
a5f74ec7 | 230 | } |
c8afe684 | 231 | |
0e08270a SS |
232 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { |
233 | mutex_unlock(&msm_obj->lock); | |
234 | return VM_FAULT_SIGBUS; | |
235 | } | |
236 | ||
c8afe684 RC |
237 | /* make sure we have pages attached now */ |
238 | pages = get_pages(obj); | |
239 | if (IS_ERR(pages)) { | |
a5f74ec7 | 240 | ret = vmf_error(PTR_ERR(pages)); |
c8afe684 RC |
241 | goto out_unlock; |
242 | } | |
243 | ||
244 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 245 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
c8afe684 | 246 | |
871d812a | 247 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 | 248 | |
1a29d85e | 249 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
c8afe684 RC |
250 | pfn, pfn << PAGE_SHIFT); |
251 | ||
a5f74ec7 | 252 | ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
c8afe684 | 253 | out_unlock: |
0e08270a | 254 | mutex_unlock(&msm_obj->lock); |
c8afe684 | 255 | out: |
a5f74ec7 | 256 | return ret; |
c8afe684 RC |
257 | } |
258 | ||
259 | /** get mmap offset */ | |
260 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
261 | { | |
262 | struct drm_device *dev = obj->dev; | |
0e08270a | 263 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 RC |
264 | int ret; |
265 | ||
0e08270a | 266 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
c8afe684 RC |
267 | |
268 | /* Make it mmapable */ | |
269 | ret = drm_gem_create_mmap_offset(obj); | |
270 | ||
271 | if (ret) { | |
6a41da17 | 272 | DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); |
c8afe684 RC |
273 | return 0; |
274 | } | |
275 | ||
276 | return drm_vma_node_offset_addr(&obj->vma_node); | |
277 | } | |
278 | ||
279 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
280 | { | |
281 | uint64_t offset; | |
0e08270a SS |
282 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
283 | ||
284 | mutex_lock(&msm_obj->lock); | |
c8afe684 | 285 | offset = mmap_offset(obj); |
0e08270a | 286 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
287 | return offset; |
288 | } | |
289 | ||
4b85f7f5 RC |
290 | static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, |
291 | struct msm_gem_address_space *aspace) | |
292 | { | |
293 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
294 | struct msm_gem_vma *vma; | |
295 | ||
0e08270a SS |
296 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
297 | ||
4b85f7f5 RC |
298 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
299 | if (!vma) | |
300 | return ERR_PTR(-ENOMEM); | |
301 | ||
302 | vma->aspace = aspace; | |
303 | ||
304 | list_add_tail(&vma->list, &msm_obj->vmas); | |
305 | ||
306 | return vma; | |
307 | } | |
308 | ||
309 | static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, | |
310 | struct msm_gem_address_space *aspace) | |
311 | { | |
312 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
313 | struct msm_gem_vma *vma; | |
314 | ||
0e08270a | 315 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4b85f7f5 RC |
316 | |
317 | list_for_each_entry(vma, &msm_obj->vmas, list) { | |
318 | if (vma->aspace == aspace) | |
319 | return vma; | |
320 | } | |
321 | ||
322 | return NULL; | |
323 | } | |
324 | ||
325 | static void del_vma(struct msm_gem_vma *vma) | |
326 | { | |
327 | if (!vma) | |
328 | return; | |
329 | ||
330 | list_del(&vma->list); | |
331 | kfree(vma); | |
332 | } | |
333 | ||
0e08270a | 334 | /* Called with msm_obj->lock locked */ |
4fe5f65e RC |
335 | static void |
336 | put_iova(struct drm_gem_object *obj) | |
337 | { | |
4fe5f65e | 338 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 | 339 | struct msm_gem_vma *vma, *tmp; |
4fe5f65e | 340 | |
0e08270a | 341 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
4fe5f65e | 342 | |
4b85f7f5 | 343 | list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { |
7ad0e8cf JC |
344 | msm_gem_purge_vma(vma->aspace, vma); |
345 | msm_gem_close_vma(vma->aspace, vma); | |
4b85f7f5 | 346 | del_vma(vma); |
4fe5f65e RC |
347 | } |
348 | } | |
349 | ||
c0ee9794 | 350 | static int msm_gem_get_iova_locked(struct drm_gem_object *obj, |
8bdcd949 | 351 | struct msm_gem_address_space *aspace, uint64_t *iova) |
c8afe684 RC |
352 | { |
353 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4b85f7f5 | 354 | struct msm_gem_vma *vma; |
c8afe684 RC |
355 | int ret = 0; |
356 | ||
c0ee9794 | 357 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
cb1e3818 | 358 | |
4b85f7f5 | 359 | vma = lookup_vma(obj, aspace); |
871d812a | 360 | |
4b85f7f5 | 361 | if (!vma) { |
4b85f7f5 | 362 | vma = add_vma(obj, aspace); |
c0ee9794 JC |
363 | if (IS_ERR(vma)) |
364 | return PTR_ERR(vma); | |
4b85f7f5 | 365 | |
c0ee9794 JC |
366 | ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); |
367 | if (ret) { | |
368 | del_vma(vma); | |
369 | return ret; | |
4b85f7f5 | 370 | } |
c8afe684 RC |
371 | } |
372 | ||
4b85f7f5 RC |
373 | *iova = vma->iova; |
374 | return 0; | |
c0ee9794 JC |
375 | } |
376 | ||
377 | static int msm_gem_pin_iova(struct drm_gem_object *obj, | |
378 | struct msm_gem_address_space *aspace) | |
379 | { | |
380 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
381 | struct msm_gem_vma *vma; | |
382 | struct page **pages; | |
bbc2cd07 RC |
383 | int prot = IOMMU_READ; |
384 | ||
385 | if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) | |
386 | prot |= IOMMU_WRITE; | |
c0ee9794 JC |
387 | |
388 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | |
389 | ||
390 | if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) | |
391 | return -EBUSY; | |
392 | ||
393 | vma = lookup_vma(obj, aspace); | |
394 | if (WARN_ON(!vma)) | |
395 | return -EINVAL; | |
396 | ||
397 | pages = get_pages(obj); | |
398 | if (IS_ERR(pages)) | |
399 | return PTR_ERR(pages); | |
400 | ||
bbc2cd07 RC |
401 | return msm_gem_map_vma(aspace, vma, prot, |
402 | msm_obj->sgt, obj->size >> PAGE_SHIFT); | |
c0ee9794 JC |
403 | } |
404 | ||
9fe041f6 JC |
405 | /* get iova and pin it. Should have a matching put */ |
406 | int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, | |
c0ee9794 JC |
407 | struct msm_gem_address_space *aspace, uint64_t *iova) |
408 | { | |
409 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
410 | u64 local; | |
411 | int ret; | |
412 | ||
413 | mutex_lock(&msm_obj->lock); | |
414 | ||
415 | ret = msm_gem_get_iova_locked(obj, aspace, &local); | |
416 | ||
417 | if (!ret) | |
418 | ret = msm_gem_pin_iova(obj, aspace); | |
419 | ||
420 | if (!ret) | |
421 | *iova = local; | |
4b85f7f5 | 422 | |
0e08270a | 423 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
424 | return ret; |
425 | } | |
426 | ||
7ad0e8cf JC |
427 | /* |
428 | * Get an iova but don't pin it. Doesn't need a put because iovas are currently | |
429 | * valid for the life of the object | |
430 | */ | |
9fe041f6 JC |
431 | int msm_gem_get_iova(struct drm_gem_object *obj, |
432 | struct msm_gem_address_space *aspace, uint64_t *iova) | |
433 | { | |
434 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
435 | int ret; | |
436 | ||
437 | mutex_lock(&msm_obj->lock); | |
438 | ret = msm_gem_get_iova_locked(obj, aspace, iova); | |
439 | mutex_unlock(&msm_obj->lock); | |
440 | ||
441 | return ret; | |
442 | } | |
443 | ||
2638d90a | 444 | /* get iova without taking a reference, used in places where you have |
9fe041f6 | 445 | * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' |
2638d90a | 446 | */ |
8bdcd949 RC |
447 | uint64_t msm_gem_iova(struct drm_gem_object *obj, |
448 | struct msm_gem_address_space *aspace) | |
2638d90a | 449 | { |
0e08270a | 450 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
4b85f7f5 RC |
451 | struct msm_gem_vma *vma; |
452 | ||
0e08270a | 453 | mutex_lock(&msm_obj->lock); |
4b85f7f5 | 454 | vma = lookup_vma(obj, aspace); |
0e08270a | 455 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
456 | WARN_ON(!vma); |
457 | ||
458 | return vma ? vma->iova : 0; | |
2638d90a RC |
459 | } |
460 | ||
7ad0e8cf JC |
461 | /* |
462 | * Unpin a iova by updating the reference counts. The memory isn't actually | |
463 | * purged until something else (shrinker, mm_notifier, destroy, etc) decides | |
464 | * to get rid of it | |
465 | */ | |
466 | void msm_gem_unpin_iova(struct drm_gem_object *obj, | |
8bdcd949 | 467 | struct msm_gem_address_space *aspace) |
c8afe684 | 468 | { |
7ad0e8cf JC |
469 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
470 | struct msm_gem_vma *vma; | |
471 | ||
472 | mutex_lock(&msm_obj->lock); | |
473 | vma = lookup_vma(obj, aspace); | |
474 | ||
475 | if (!WARN_ON(!vma)) | |
476 | msm_gem_unmap_vma(aspace, vma); | |
477 | ||
478 | mutex_unlock(&msm_obj->lock); | |
c8afe684 RC |
479 | } |
480 | ||
481 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
482 | struct drm_mode_create_dumb *args) | |
483 | { | |
484 | args->pitch = align_pitch(args->width, args->bpp); | |
485 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
486 | return msm_gem_new_handle(dev, file, args->size, | |
0815d774 | 487 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); |
c8afe684 RC |
488 | } |
489 | ||
c8afe684 RC |
490 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
491 | uint32_t handle, uint64_t *offset) | |
492 | { | |
493 | struct drm_gem_object *obj; | |
494 | int ret = 0; | |
495 | ||
496 | /* GEM does all our handle to object mapping */ | |
a8ad0bd8 | 497 | obj = drm_gem_object_lookup(file, handle); |
c8afe684 RC |
498 | if (obj == NULL) { |
499 | ret = -ENOENT; | |
500 | goto fail; | |
501 | } | |
502 | ||
503 | *offset = msm_gem_mmap_offset(obj); | |
504 | ||
dc9a9b32 | 505 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
506 | |
507 | fail: | |
508 | return ret; | |
509 | } | |
510 | ||
fad33f4b | 511 | static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) |
c8afe684 RC |
512 | { |
513 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
0e08270a SS |
514 | int ret = 0; |
515 | ||
516 | mutex_lock(&msm_obj->lock); | |
517 | ||
fad33f4b | 518 | if (WARN_ON(msm_obj->madv > madv)) { |
6a41da17 | 519 | DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", |
fad33f4b | 520 | msm_obj->madv, madv); |
0e08270a SS |
521 | mutex_unlock(&msm_obj->lock); |
522 | return ERR_PTR(-EBUSY); | |
523 | } | |
524 | ||
525 | /* increment vmap_count *before* vmap() call, so shrinker can | |
526 | * check vmap_count (is_vunmapable()) outside of msm_obj->lock. | |
527 | * This guarantees that we won't try to msm_gem_vunmap() this | |
528 | * same object from within the vmap() call (while we already | |
529 | * hold msm_obj->lock) | |
530 | */ | |
531 | msm_obj->vmap_count++; | |
532 | ||
c8afe684 RC |
533 | if (!msm_obj->vaddr) { |
534 | struct page **pages = get_pages(obj); | |
0e08270a SS |
535 | if (IS_ERR(pages)) { |
536 | ret = PTR_ERR(pages); | |
537 | goto fail; | |
538 | } | |
c8afe684 RC |
539 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
540 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
0e08270a SS |
541 | if (msm_obj->vaddr == NULL) { |
542 | ret = -ENOMEM; | |
543 | goto fail; | |
544 | } | |
c8afe684 | 545 | } |
0e08270a SS |
546 | |
547 | mutex_unlock(&msm_obj->lock); | |
c8afe684 | 548 | return msm_obj->vaddr; |
c8afe684 | 549 | |
0e08270a SS |
550 | fail: |
551 | msm_obj->vmap_count--; | |
552 | mutex_unlock(&msm_obj->lock); | |
553 | return ERR_PTR(ret); | |
c8afe684 RC |
554 | } |
555 | ||
fad33f4b RC |
556 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
557 | { | |
558 | return get_vaddr(obj, MSM_MADV_WILLNEED); | |
559 | } | |
560 | ||
561 | /* | |
562 | * Don't use this! It is for the very special case of dumping | |
563 | * submits from GPU hangs or faults, were the bo may already | |
564 | * be MSM_MADV_DONTNEED, but we know the buffer is still on the | |
565 | * active list. | |
566 | */ | |
567 | void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) | |
568 | { | |
569 | return get_vaddr(obj, __MSM_MADV_PURGED); | |
570 | } | |
571 | ||
0e08270a | 572 | void msm_gem_put_vaddr(struct drm_gem_object *obj) |
18f23049 | 573 | { |
e1e9db2c | 574 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
0e08270a SS |
575 | |
576 | mutex_lock(&msm_obj->lock); | |
e1e9db2c RC |
577 | WARN_ON(msm_obj->vmap_count < 1); |
578 | msm_obj->vmap_count--; | |
0e08270a | 579 | mutex_unlock(&msm_obj->lock); |
18f23049 RC |
580 | } |
581 | ||
4cd33c48 RC |
582 | /* Update madvise status, returns true if not purged, else |
583 | * false or -errno. | |
584 | */ | |
585 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) | |
586 | { | |
587 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
588 | ||
0e08270a SS |
589 | mutex_lock(&msm_obj->lock); |
590 | ||
4cd33c48 RC |
591 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
592 | ||
593 | if (msm_obj->madv != __MSM_MADV_PURGED) | |
594 | msm_obj->madv = madv; | |
595 | ||
0e08270a SS |
596 | madv = msm_obj->madv; |
597 | ||
598 | mutex_unlock(&msm_obj->lock); | |
599 | ||
600 | return (madv != __MSM_MADV_PURGED); | |
4cd33c48 RC |
601 | } |
602 | ||
0e08270a | 603 | void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
68209390 RC |
604 | { |
605 | struct drm_device *dev = obj->dev; | |
606 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
607 | ||
608 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
609 | WARN_ON(!is_purgeable(msm_obj)); | |
610 | WARN_ON(obj->import_attach); | |
611 | ||
0e08270a SS |
612 | mutex_lock_nested(&msm_obj->lock, subclass); |
613 | ||
68209390 RC |
614 | put_iova(obj); |
615 | ||
0e08270a | 616 | msm_gem_vunmap_locked(obj); |
68209390 RC |
617 | |
618 | put_pages(obj); | |
619 | ||
620 | msm_obj->madv = __MSM_MADV_PURGED; | |
621 | ||
622 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
623 | drm_gem_free_mmap_offset(obj); | |
624 | ||
625 | /* Our goal here is to return as much of the memory as | |
626 | * is possible back to the system as we are called from OOM. | |
627 | * To do this we must instruct the shmfs to drop all of its | |
628 | * backing pages, *now*. | |
629 | */ | |
630 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
631 | ||
632 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | |
633 | 0, (loff_t)-1); | |
0e08270a SS |
634 | |
635 | mutex_unlock(&msm_obj->lock); | |
68209390 RC |
636 | } |
637 | ||
0e08270a | 638 | static void msm_gem_vunmap_locked(struct drm_gem_object *obj) |
e1e9db2c RC |
639 | { |
640 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
641 | ||
0e08270a SS |
642 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
643 | ||
e1e9db2c RC |
644 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) |
645 | return; | |
646 | ||
647 | vunmap(msm_obj->vaddr); | |
648 | msm_obj->vaddr = NULL; | |
649 | } | |
650 | ||
0e08270a SS |
651 | void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) |
652 | { | |
653 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
654 | ||
655 | mutex_lock_nested(&msm_obj->lock, subclass); | |
656 | msm_gem_vunmap_locked(obj); | |
657 | mutex_unlock(&msm_obj->lock); | |
658 | } | |
659 | ||
b6295f9a RC |
660 | /* must be called before _move_to_active().. */ |
661 | int msm_gem_sync_object(struct drm_gem_object *obj, | |
662 | struct msm_fence_context *fctx, bool exclusive) | |
663 | { | |
b6295f9a | 664 | struct reservation_object_list *fobj; |
f54d1867 | 665 | struct dma_fence *fence; |
b6295f9a RC |
666 | int i, ret; |
667 | ||
dd55cf69 | 668 | fobj = reservation_object_get_list(obj->resv); |
b6295f9a | 669 | if (!fobj || (fobj->shared_count == 0)) { |
dd55cf69 | 670 | fence = reservation_object_get_excl(obj->resv); |
b6295f9a RC |
671 | /* don't need to wait on our own fences, since ring is fifo */ |
672 | if (fence && (fence->context != fctx->context)) { | |
f54d1867 | 673 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
674 | if (ret) |
675 | return ret; | |
676 | } | |
677 | } | |
678 | ||
679 | if (!exclusive || !fobj) | |
680 | return 0; | |
681 | ||
682 | for (i = 0; i < fobj->shared_count; i++) { | |
683 | fence = rcu_dereference_protected(fobj->shared[i], | |
dd55cf69 | 684 | reservation_object_held(obj->resv)); |
b6295f9a | 685 | if (fence->context != fctx->context) { |
f54d1867 | 686 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
687 | if (ret) |
688 | return ret; | |
689 | } | |
690 | } | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
7198e6b0 | 695 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
f54d1867 | 696 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
7198e6b0 RC |
697 | { |
698 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4cd33c48 | 699 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
7198e6b0 | 700 | msm_obj->gpu = gpu; |
b6295f9a | 701 | if (exclusive) |
dd55cf69 | 702 | reservation_object_add_excl_fence(obj->resv, fence); |
bf6811f3 | 703 | else |
dd55cf69 | 704 | reservation_object_add_shared_fence(obj->resv, fence); |
7198e6b0 RC |
705 | list_del_init(&msm_obj->mm_list); |
706 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
707 | } | |
708 | ||
709 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
710 | { | |
711 | struct drm_device *dev = obj->dev; | |
712 | struct msm_drm_private *priv = dev->dev_private; | |
713 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
714 | ||
715 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
716 | ||
717 | msm_obj->gpu = NULL; | |
7198e6b0 RC |
718 | list_del_init(&msm_obj->mm_list); |
719 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
720 | } |
721 | ||
b6295f9a | 722 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
7198e6b0 | 723 | { |
b6295f9a | 724 | bool write = !!(op & MSM_PREP_WRITE); |
f755e227 CW |
725 | unsigned long remain = |
726 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); | |
727 | long ret; | |
728 | ||
dd55cf69 | 729 | ret = reservation_object_wait_timeout_rcu(obj->resv, write, |
f755e227 CW |
730 | true, remain); |
731 | if (ret == 0) | |
732 | return remain == 0 ? -EBUSY : -ETIMEDOUT; | |
733 | else if (ret < 0) | |
734 | return ret; | |
7198e6b0 RC |
735 | |
736 | /* TODO cache maintenance */ | |
c8afe684 | 737 | |
b6295f9a | 738 | return 0; |
7198e6b0 | 739 | } |
c8afe684 | 740 | |
7198e6b0 RC |
741 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
742 | { | |
743 | /* TODO cache maintenance */ | |
c8afe684 RC |
744 | return 0; |
745 | } | |
746 | ||
747 | #ifdef CONFIG_DEBUG_FS | |
f54d1867 | 748 | static void describe_fence(struct dma_fence *fence, const char *type, |
b6295f9a RC |
749 | struct seq_file *m) |
750 | { | |
f54d1867 | 751 | if (!dma_fence_is_signaled(fence)) |
a3115621 | 752 | seq_printf(m, "\t%9s: %s %s seq %llu\n", type, |
b6295f9a RC |
753 | fence->ops->get_driver_name(fence), |
754 | fence->ops->get_timeline_name(fence), | |
755 | fence->seqno); | |
756 | } | |
757 | ||
c8afe684 RC |
758 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
759 | { | |
c8afe684 | 760 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
dd55cf69 | 761 | struct reservation_object *robj = obj->resv; |
b6295f9a | 762 | struct reservation_object_list *fobj; |
f54d1867 | 763 | struct dma_fence *fence; |
4b85f7f5 | 764 | struct msm_gem_vma *vma; |
c8afe684 | 765 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
4cd33c48 | 766 | const char *madv; |
c8afe684 | 767 | |
0e08270a | 768 | mutex_lock(&msm_obj->lock); |
b6295f9a | 769 | |
4cd33c48 RC |
770 | switch (msm_obj->madv) { |
771 | case __MSM_MADV_PURGED: | |
772 | madv = " purged"; | |
773 | break; | |
774 | case MSM_MADV_DONTNEED: | |
775 | madv = " purgeable"; | |
776 | break; | |
777 | case MSM_MADV_WILLNEED: | |
778 | default: | |
779 | madv = ""; | |
780 | break; | |
781 | } | |
782 | ||
575f0485 | 783 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", |
7198e6b0 | 784 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
2c935bc5 | 785 | obj->name, kref_read(&obj->refcount), |
667ce33e RC |
786 | off, msm_obj->vaddr); |
787 | ||
0815d774 | 788 | seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); |
667ce33e | 789 | |
575f0485 JC |
790 | if (!list_empty(&msm_obj->vmas)) { |
791 | ||
7ad0e8cf | 792 | seq_puts(m, " vmas:"); |
575f0485 JC |
793 | |
794 | list_for_each_entry(vma, &msm_obj->vmas, list) | |
90f94660 BM |
795 | seq_printf(m, " [%s: %08llx,%s,inuse=%d]", |
796 | vma->aspace != NULL ? vma->aspace->name : NULL, | |
7ad0e8cf JC |
797 | vma->iova, vma->mapped ? "mapped" : "unmapped", |
798 | vma->inuse); | |
575f0485 JC |
799 | |
800 | seq_puts(m, "\n"); | |
801 | } | |
b6295f9a RC |
802 | |
803 | rcu_read_lock(); | |
804 | fobj = rcu_dereference(robj->fence); | |
805 | if (fobj) { | |
806 | unsigned int i, shared_count = fobj->shared_count; | |
807 | ||
808 | for (i = 0; i < shared_count; i++) { | |
809 | fence = rcu_dereference(fobj->shared[i]); | |
810 | describe_fence(fence, "Shared", m); | |
811 | } | |
812 | } | |
813 | ||
814 | fence = rcu_dereference(robj->fence_excl); | |
815 | if (fence) | |
816 | describe_fence(fence, "Exclusive", m); | |
817 | rcu_read_unlock(); | |
0e08270a SS |
818 | |
819 | mutex_unlock(&msm_obj->lock); | |
c8afe684 RC |
820 | } |
821 | ||
822 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
823 | { | |
824 | struct msm_gem_object *msm_obj; | |
825 | int count = 0; | |
826 | size_t size = 0; | |
827 | ||
0815d774 | 828 | seq_puts(m, " flags id ref offset kaddr size madv name\n"); |
c8afe684 RC |
829 | list_for_each_entry(msm_obj, list, mm_list) { |
830 | struct drm_gem_object *obj = &msm_obj->base; | |
575f0485 | 831 | seq_puts(m, " "); |
c8afe684 RC |
832 | msm_gem_describe(obj, m); |
833 | count++; | |
834 | size += obj->size; | |
835 | } | |
836 | ||
837 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
838 | } | |
839 | #endif | |
840 | ||
d71b6bd8 | 841 | /* don't call directly! Use drm_gem_object_put() and friends */ |
c8afe684 RC |
842 | void msm_gem_free_object(struct drm_gem_object *obj) |
843 | { | |
c8afe684 | 844 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
48e7f183 KK |
845 | struct drm_device *dev = obj->dev; |
846 | struct msm_drm_private *priv = dev->dev_private; | |
847 | ||
848 | if (llist_add(&msm_obj->freed, &priv->free_list)) | |
849 | queue_work(priv->wq, &priv->free_work); | |
850 | } | |
851 | ||
852 | static void free_object(struct msm_gem_object *msm_obj) | |
853 | { | |
854 | struct drm_gem_object *obj = &msm_obj->base; | |
855 | struct drm_device *dev = obj->dev; | |
c8afe684 RC |
856 | |
857 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
858 | ||
7198e6b0 RC |
859 | /* object should not be on active list: */ |
860 | WARN_ON(is_active(msm_obj)); | |
861 | ||
c8afe684 RC |
862 | list_del(&msm_obj->mm_list); |
863 | ||
0e08270a SS |
864 | mutex_lock(&msm_obj->lock); |
865 | ||
4fe5f65e | 866 | put_iova(obj); |
c8afe684 | 867 | |
05b84911 RC |
868 | if (obj->import_attach) { |
869 | if (msm_obj->vaddr) | |
870 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
871 | ||
872 | /* Don't drop the pages for imported dmabuf, as they are not | |
873 | * ours, just free the array we allocated: | |
874 | */ | |
875 | if (msm_obj->pages) | |
2098105e | 876 | kvfree(msm_obj->pages); |
c8afe684 | 877 | |
f28730c8 | 878 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
05b84911 | 879 | } else { |
0e08270a | 880 | msm_gem_vunmap_locked(obj); |
05b84911 RC |
881 | put_pages(obj); |
882 | } | |
c8afe684 RC |
883 | |
884 | drm_gem_object_release(obj); | |
885 | ||
0e08270a | 886 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
887 | kfree(msm_obj); |
888 | } | |
889 | ||
48e7f183 KK |
890 | void msm_gem_free_work(struct work_struct *work) |
891 | { | |
892 | struct msm_drm_private *priv = | |
893 | container_of(work, struct msm_drm_private, free_work); | |
894 | struct drm_device *dev = priv->dev; | |
895 | struct llist_node *freed; | |
896 | struct msm_gem_object *msm_obj, *next; | |
897 | ||
898 | while ((freed = llist_del_all(&priv->free_list))) { | |
899 | ||
900 | mutex_lock(&dev->struct_mutex); | |
901 | ||
902 | llist_for_each_entry_safe(msm_obj, next, | |
903 | freed, freed) | |
904 | free_object(msm_obj); | |
905 | ||
906 | mutex_unlock(&dev->struct_mutex); | |
907 | ||
908 | if (need_resched()) | |
909 | break; | |
910 | } | |
911 | } | |
912 | ||
c8afe684 RC |
913 | /* convenience method to construct a GEM buffer object, and userspace handle */ |
914 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
0815d774 JC |
915 | uint32_t size, uint32_t flags, uint32_t *handle, |
916 | char *name) | |
c8afe684 RC |
917 | { |
918 | struct drm_gem_object *obj; | |
919 | int ret; | |
920 | ||
c8afe684 RC |
921 | obj = msm_gem_new(dev, size, flags); |
922 | ||
c8afe684 RC |
923 | if (IS_ERR(obj)) |
924 | return PTR_ERR(obj); | |
925 | ||
0815d774 JC |
926 | if (name) |
927 | msm_gem_object_set_name(obj, "%s", name); | |
928 | ||
c8afe684 RC |
929 | ret = drm_gem_handle_create(file, obj, handle); |
930 | ||
931 | /* drop reference from allocate - handle holds it now */ | |
dc9a9b32 | 932 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
933 | |
934 | return ret; | |
935 | } | |
936 | ||
05b84911 RC |
937 | static int msm_gem_new_impl(struct drm_device *dev, |
938 | uint32_t size, uint32_t flags, | |
79f0e202 | 939 | struct reservation_object *resv, |
0e08270a SS |
940 | struct drm_gem_object **obj, |
941 | bool struct_mutex_locked) | |
c8afe684 RC |
942 | { |
943 | struct msm_drm_private *priv = dev->dev_private; | |
944 | struct msm_gem_object *msm_obj; | |
c8afe684 RC |
945 | |
946 | switch (flags & MSM_BO_CACHE_MASK) { | |
947 | case MSM_BO_UNCACHED: | |
948 | case MSM_BO_CACHED: | |
949 | case MSM_BO_WC: | |
950 | break; | |
951 | default: | |
6a41da17 | 952 | DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", |
c8afe684 | 953 | (flags & MSM_BO_CACHE_MASK)); |
05b84911 | 954 | return -EINVAL; |
c8afe684 RC |
955 | } |
956 | ||
667ce33e | 957 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
05b84911 RC |
958 | if (!msm_obj) |
959 | return -ENOMEM; | |
c8afe684 | 960 | |
0e08270a SS |
961 | mutex_init(&msm_obj->lock); |
962 | ||
c8afe684 | 963 | msm_obj->flags = flags; |
4cd33c48 | 964 | msm_obj->madv = MSM_MADV_WILLNEED; |
c8afe684 | 965 | |
dd55cf69 RH |
966 | if (resv) |
967 | msm_obj->base.resv = resv; | |
c8afe684 | 968 | |
7198e6b0 | 969 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
4b85f7f5 RC |
970 | INIT_LIST_HEAD(&msm_obj->vmas); |
971 | ||
0e08270a SS |
972 | if (struct_mutex_locked) { |
973 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
974 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
975 | } else { | |
976 | mutex_lock(&dev->struct_mutex); | |
977 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
978 | mutex_unlock(&dev->struct_mutex); | |
979 | } | |
c8afe684 | 980 | |
05b84911 RC |
981 | *obj = &msm_obj->base; |
982 | ||
983 | return 0; | |
984 | } | |
985 | ||
0e08270a SS |
986 | static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, |
987 | uint32_t size, uint32_t flags, bool struct_mutex_locked) | |
05b84911 | 988 | { |
f4839bd5 | 989 | struct msm_drm_private *priv = dev->dev_private; |
871d812a | 990 | struct drm_gem_object *obj = NULL; |
f4839bd5 | 991 | bool use_vram = false; |
05b84911 RC |
992 | int ret; |
993 | ||
05b84911 RC |
994 | size = PAGE_ALIGN(size); |
995 | ||
c2052a4e | 996 | if (!msm_use_mmu(dev)) |
f4839bd5 | 997 | use_vram = true; |
86f46f25 | 998 | else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) |
f4839bd5 RC |
999 | use_vram = true; |
1000 | ||
1001 | if (WARN_ON(use_vram && !priv->vram.size)) | |
1002 | return ERR_PTR(-EINVAL); | |
1003 | ||
1a5dff5d JC |
1004 | /* Disallow zero sized objects as they make the underlying |
1005 | * infrastructure grumpy | |
1006 | */ | |
1007 | if (size == 0) | |
1008 | return ERR_PTR(-EINVAL); | |
1009 | ||
0e08270a | 1010 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); |
05b84911 RC |
1011 | if (ret) |
1012 | goto fail; | |
1013 | ||
f4839bd5 | 1014 | if (use_vram) { |
4b85f7f5 | 1015 | struct msm_gem_vma *vma; |
f4839bd5 | 1016 | struct page **pages; |
b3949a9a HV |
1017 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
1018 | ||
1019 | mutex_lock(&msm_obj->lock); | |
f4839bd5 | 1020 | |
4b85f7f5 | 1021 | vma = add_vma(obj, NULL); |
b3949a9a | 1022 | mutex_unlock(&msm_obj->lock); |
4b85f7f5 RC |
1023 | if (IS_ERR(vma)) { |
1024 | ret = PTR_ERR(vma); | |
1025 | goto fail; | |
1026 | } | |
1027 | ||
1028 | to_msm_bo(obj)->vram_node = &vma->node; | |
1029 | ||
f4839bd5 RC |
1030 | drm_gem_private_object_init(dev, obj, size); |
1031 | ||
f4839bd5 RC |
1032 | pages = get_pages(obj); |
1033 | if (IS_ERR(pages)) { | |
1034 | ret = PTR_ERR(pages); | |
1035 | goto fail; | |
1036 | } | |
4b85f7f5 RC |
1037 | |
1038 | vma->iova = physaddr(obj); | |
f4839bd5 | 1039 | } else { |
871d812a RC |
1040 | ret = drm_gem_object_init(dev, obj, size); |
1041 | if (ret) | |
1042 | goto fail; | |
0abdba47 LS |
1043 | /* |
1044 | * Our buffers are kept pinned, so allocating them from the | |
1045 | * MOVABLE zone is a really bad idea, and conflicts with CMA. | |
1046 | * See comments above new_inode() why this is required _and_ | |
1047 | * expected if you're going to pin these pages. | |
1048 | */ | |
1049 | mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); | |
871d812a | 1050 | } |
05b84911 RC |
1051 | |
1052 | return obj; | |
1053 | ||
1054 | fail: | |
dc9a9b32 | 1055 | drm_gem_object_put_unlocked(obj); |
05b84911 RC |
1056 | return ERR_PTR(ret); |
1057 | } | |
1058 | ||
0e08270a SS |
1059 | struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, |
1060 | uint32_t size, uint32_t flags) | |
1061 | { | |
1062 | return _msm_gem_new(dev, size, flags, true); | |
1063 | } | |
1064 | ||
1065 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
1066 | uint32_t size, uint32_t flags) | |
1067 | { | |
1068 | return _msm_gem_new(dev, size, flags, false); | |
1069 | } | |
1070 | ||
05b84911 | 1071 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
79f0e202 | 1072 | struct dma_buf *dmabuf, struct sg_table *sgt) |
05b84911 RC |
1073 | { |
1074 | struct msm_gem_object *msm_obj; | |
1075 | struct drm_gem_object *obj; | |
79f0e202 | 1076 | uint32_t size; |
05b84911 RC |
1077 | int ret, npages; |
1078 | ||
871d812a | 1079 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
c2052a4e | 1080 | if (!msm_use_mmu(dev)) { |
6a41da17 | 1081 | DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); |
871d812a RC |
1082 | return ERR_PTR(-EINVAL); |
1083 | } | |
1084 | ||
79f0e202 | 1085 | size = PAGE_ALIGN(dmabuf->size); |
05b84911 | 1086 | |
0e08270a | 1087 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); |
05b84911 RC |
1088 | if (ret) |
1089 | goto fail; | |
1090 | ||
1091 | drm_gem_private_object_init(dev, obj, size); | |
1092 | ||
1093 | npages = size / PAGE_SIZE; | |
1094 | ||
1095 | msm_obj = to_msm_bo(obj); | |
0e08270a | 1096 | mutex_lock(&msm_obj->lock); |
05b84911 | 1097 | msm_obj->sgt = sgt; |
2098105e | 1098 | msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
05b84911 | 1099 | if (!msm_obj->pages) { |
0e08270a | 1100 | mutex_unlock(&msm_obj->lock); |
05b84911 RC |
1101 | ret = -ENOMEM; |
1102 | goto fail; | |
1103 | } | |
1104 | ||
1105 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
0e08270a SS |
1106 | if (ret) { |
1107 | mutex_unlock(&msm_obj->lock); | |
05b84911 | 1108 | goto fail; |
0e08270a | 1109 | } |
05b84911 | 1110 | |
0e08270a | 1111 | mutex_unlock(&msm_obj->lock); |
c8afe684 RC |
1112 | return obj; |
1113 | ||
1114 | fail: | |
dc9a9b32 | 1115 | drm_gem_object_put_unlocked(obj); |
c8afe684 RC |
1116 | return ERR_PTR(ret); |
1117 | } | |
8223286d JC |
1118 | |
1119 | static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1120 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1121 | struct drm_gem_object **bo, uint64_t *iova, bool locked) | |
1122 | { | |
1123 | void *vaddr; | |
1124 | struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); | |
1125 | int ret; | |
1126 | ||
1127 | if (IS_ERR(obj)) | |
1128 | return ERR_CAST(obj); | |
1129 | ||
1130 | if (iova) { | |
9fe041f6 | 1131 | ret = msm_gem_get_and_pin_iova(obj, aspace, iova); |
93f7abf1 JC |
1132 | if (ret) |
1133 | goto err; | |
8223286d JC |
1134 | } |
1135 | ||
1136 | vaddr = msm_gem_get_vaddr(obj); | |
c9811d0f | 1137 | if (IS_ERR(vaddr)) { |
7ad0e8cf | 1138 | msm_gem_unpin_iova(obj, aspace); |
93f7abf1 JC |
1139 | ret = PTR_ERR(vaddr); |
1140 | goto err; | |
8223286d JC |
1141 | } |
1142 | ||
1143 | if (bo) | |
1144 | *bo = obj; | |
1145 | ||
1146 | return vaddr; | |
93f7abf1 JC |
1147 | err: |
1148 | if (locked) | |
1149 | drm_gem_object_put(obj); | |
1150 | else | |
1151 | drm_gem_object_put_unlocked(obj); | |
1152 | ||
1153 | return ERR_PTR(ret); | |
1154 | ||
8223286d JC |
1155 | } |
1156 | ||
1157 | void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |
1158 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1159 | struct drm_gem_object **bo, uint64_t *iova) | |
1160 | { | |
1161 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); | |
1162 | } | |
1163 | ||
1164 | void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, | |
1165 | uint32_t flags, struct msm_gem_address_space *aspace, | |
1166 | struct drm_gem_object **bo, uint64_t *iova) | |
1167 | { | |
1168 | return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); | |
1169 | } | |
1e29dff0 JC |
1170 | |
1171 | void msm_gem_kernel_put(struct drm_gem_object *bo, | |
1172 | struct msm_gem_address_space *aspace, bool locked) | |
1173 | { | |
1174 | if (IS_ERR_OR_NULL(bo)) | |
1175 | return; | |
1176 | ||
1177 | msm_gem_put_vaddr(bo); | |
7ad0e8cf | 1178 | msm_gem_unpin_iova(bo, aspace); |
1e29dff0 JC |
1179 | |
1180 | if (locked) | |
1181 | drm_gem_object_put(bo); | |
1182 | else | |
1183 | drm_gem_object_put_unlocked(bo); | |
1184 | } | |
0815d774 JC |
1185 | |
1186 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) | |
1187 | { | |
1188 | struct msm_gem_object *msm_obj = to_msm_bo(bo); | |
1189 | va_list ap; | |
1190 | ||
1191 | if (!fmt) | |
1192 | return; | |
1193 | ||
1194 | va_start(ap, fmt); | |
1195 | vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); | |
1196 | va_end(ap); | |
1197 | } |