]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Jerome Glisse <glisse@freedesktop.org> | |
26 | */ | |
fdf2f6c5 SR |
27 | |
28 | #include <linux/file.h> | |
568d7c76 | 29 | #include <linux/pagemap.h> |
7ca24cf2 | 30 | #include <linux/sync_file.h> |
4993ba02 | 31 | #include <linux/dma-buf.h> |
fdf2f6c5 | 32 | |
d38ceaf9 | 33 | #include <drm/amdgpu_drm.h> |
660e8558 | 34 | #include <drm/drm_syncobj.h> |
a190f8dc | 35 | #include "amdgpu_cs.h" |
d38ceaf9 AD |
36 | #include "amdgpu.h" |
37 | #include "amdgpu_trace.h" | |
c8c5e569 | 38 | #include "amdgpu_gmc.h" |
2cddc50e | 39 | #include "amdgpu_gem.h" |
7c6e68c7 | 40 | #include "amdgpu_ras.h" |
d38ceaf9 | 41 | |
91acbeb6 | 42 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, |
758ac17f CK |
43 | struct drm_amdgpu_cs_chunk_fence *data, |
44 | uint32_t *offset) | |
91acbeb6 CK |
45 | { |
46 | struct drm_gem_object *gobj; | |
e83dfe4d | 47 | struct amdgpu_bo *bo; |
aa29040b | 48 | unsigned long size; |
7893499e | 49 | int r; |
91acbeb6 | 50 | |
a8ad0bd8 | 51 | gobj = drm_gem_object_lookup(p->filp, data->handle); |
91acbeb6 CK |
52 | if (gobj == NULL) |
53 | return -EINVAL; | |
54 | ||
e83dfe4d | 55 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
91acbeb6 | 56 | p->uf_entry.priority = 0; |
e83dfe4d | 57 | p->uf_entry.tv.bo = &bo->tbo; |
73511edf CK |
58 | /* One for TTM and two for the CS job */ |
59 | p->uf_entry.tv.num_shared = 3; | |
aa29040b | 60 | |
e07ddb0c | 61 | drm_gem_object_put(gobj); |
758ac17f | 62 | |
e83dfe4d | 63 | size = amdgpu_bo_size(bo); |
7893499e CK |
64 | if (size != PAGE_SIZE || (data->offset + 8) > size) { |
65 | r = -EINVAL; | |
66 | goto error_unref; | |
67 | } | |
68 | ||
e83dfe4d | 69 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { |
7893499e CK |
70 | r = -EINVAL; |
71 | goto error_unref; | |
758ac17f CK |
72 | } |
73 | ||
7893499e CK |
74 | *offset = data->offset; |
75 | ||
91acbeb6 | 76 | return 0; |
7893499e CK |
77 | |
78 | error_unref: | |
e83dfe4d | 79 | amdgpu_bo_unref(&bo); |
7893499e | 80 | return r; |
91acbeb6 CK |
81 | } |
82 | ||
964d0fbf AG |
83 | static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, |
84 | struct drm_amdgpu_bo_list_in *data) | |
85 | { | |
86 | int r; | |
87 | struct drm_amdgpu_bo_list_entry *info = NULL; | |
88 | ||
89 | r = amdgpu_bo_create_list_entry_array(data, &info); | |
90 | if (r) | |
91 | return r; | |
92 | ||
93 | r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, | |
94 | &p->bo_list); | |
95 | if (r) | |
96 | goto error_free; | |
97 | ||
98 | kvfree(info); | |
99 | return 0; | |
100 | ||
101 | error_free: | |
802b8c83 | 102 | kvfree(info); |
964d0fbf AG |
103 | |
104 | return r; | |
105 | } | |
106 | ||
107 | static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) | |
d38ceaf9 | 108 | { |
4c0b242c | 109 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
c5637837 | 110 | struct amdgpu_vm *vm = &fpriv->vm; |
d38ceaf9 | 111 | uint64_t *chunk_array_user; |
1d263474 | 112 | uint64_t *chunk_array; |
50838c8c | 113 | unsigned size, num_ibs = 0; |
758ac17f | 114 | uint32_t uf_offset = 0; |
54313503 | 115 | int i; |
1d263474 | 116 | int ret; |
d38ceaf9 | 117 | |
1d263474 | 118 | if (cs->in.num_chunks == 0) |
31ab27b1 | 119 | return -EINVAL; |
1d263474 | 120 | |
b4d916ee | 121 | chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); |
1d263474 DC |
122 | if (!chunk_array) |
123 | return -ENOMEM; | |
d38ceaf9 | 124 | |
3cb485f3 CK |
125 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
126 | if (!p->ctx) { | |
1d263474 DC |
127 | ret = -EINVAL; |
128 | goto free_chunk; | |
3cb485f3 | 129 | } |
1d263474 | 130 | |
94f4c496 CK |
131 | mutex_lock(&p->ctx->lock); |
132 | ||
7716ea56 ML |
133 | /* skip guilty context job */ |
134 | if (atomic_read(&p->ctx->guilty) == 1) { | |
135 | ret = -ECANCELED; | |
136 | goto free_chunk; | |
137 | } | |
138 | ||
d38ceaf9 | 139 | /* get chunks */ |
7ecc245a | 140 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); |
d38ceaf9 AD |
141 | if (copy_from_user(chunk_array, chunk_array_user, |
142 | sizeof(uint64_t)*cs->in.num_chunks)) { | |
1d263474 | 143 | ret = -EFAULT; |
26eedf6d | 144 | goto free_chunk; |
d38ceaf9 AD |
145 | } |
146 | ||
147 | p->nchunks = cs->in.num_chunks; | |
b4d916ee | 148 | p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
d38ceaf9 | 149 | GFP_KERNEL); |
1d263474 DC |
150 | if (!p->chunks) { |
151 | ret = -ENOMEM; | |
26eedf6d | 152 | goto free_chunk; |
d38ceaf9 AD |
153 | } |
154 | ||
155 | for (i = 0; i < p->nchunks; i++) { | |
156 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | |
157 | struct drm_amdgpu_cs_chunk user_chunk; | |
158 | uint32_t __user *cdata; | |
159 | ||
7ecc245a | 160 | chunk_ptr = u64_to_user_ptr(chunk_array[i]); |
d38ceaf9 AD |
161 | if (copy_from_user(&user_chunk, chunk_ptr, |
162 | sizeof(struct drm_amdgpu_cs_chunk))) { | |
1d263474 DC |
163 | ret = -EFAULT; |
164 | i--; | |
165 | goto free_partial_kdata; | |
d38ceaf9 AD |
166 | } |
167 | p->chunks[i].chunk_id = user_chunk.chunk_id; | |
168 | p->chunks[i].length_dw = user_chunk.length_dw; | |
d38ceaf9 AD |
169 | |
170 | size = p->chunks[i].length_dw; | |
7ecc245a | 171 | cdata = u64_to_user_ptr(user_chunk.chunk_data); |
d38ceaf9 | 172 | |
2098105e | 173 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
d38ceaf9 | 174 | if (p->chunks[i].kdata == NULL) { |
1d263474 DC |
175 | ret = -ENOMEM; |
176 | i--; | |
177 | goto free_partial_kdata; | |
d38ceaf9 AD |
178 | } |
179 | size *= sizeof(uint32_t); | |
180 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | |
1d263474 DC |
181 | ret = -EFAULT; |
182 | goto free_partial_kdata; | |
d38ceaf9 AD |
183 | } |
184 | ||
9a5e8fb1 CK |
185 | switch (p->chunks[i].chunk_id) { |
186 | case AMDGPU_CHUNK_ID_IB: | |
50838c8c | 187 | ++num_ibs; |
9a5e8fb1 CK |
188 | break; |
189 | ||
190 | case AMDGPU_CHUNK_ID_FENCE: | |
d38ceaf9 | 191 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
91acbeb6 | 192 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
1d263474 DC |
193 | ret = -EINVAL; |
194 | goto free_partial_kdata; | |
d38ceaf9 | 195 | } |
91acbeb6 | 196 | |
758ac17f CK |
197 | ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, |
198 | &uf_offset); | |
91acbeb6 CK |
199 | if (ret) |
200 | goto free_partial_kdata; | |
201 | ||
9a5e8fb1 CK |
202 | break; |
203 | ||
964d0fbf AG |
204 | case AMDGPU_CHUNK_ID_BO_HANDLES: |
205 | size = sizeof(struct drm_amdgpu_bo_list_in); | |
206 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { | |
207 | ret = -EINVAL; | |
208 | goto free_partial_kdata; | |
209 | } | |
210 | ||
211 | ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); | |
212 | if (ret) | |
213 | goto free_partial_kdata; | |
214 | ||
215 | break; | |
216 | ||
2b48d323 | 217 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
660e8558 DA |
218 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
219 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: | |
67dd1a36 | 220 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: |
2624dd15 CZ |
221 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: |
222 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: | |
2b48d323 CK |
223 | break; |
224 | ||
9a5e8fb1 | 225 | default: |
1d263474 DC |
226 | ret = -EINVAL; |
227 | goto free_partial_kdata; | |
d38ceaf9 AD |
228 | } |
229 | } | |
230 | ||
c5637837 | 231 | ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); |
50838c8c | 232 | if (ret) |
4acabfe3 | 233 | goto free_all_kdata; |
d38ceaf9 | 234 | |
e55f2b64 CK |
235 | if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { |
236 | ret = -ECANCELED; | |
237 | goto free_all_kdata; | |
238 | } | |
14e47f93 | 239 | |
e83dfe4d | 240 | if (p->uf_entry.tv.bo) |
b5f5acbc | 241 | p->job->uf_addr = uf_offset; |
b4d916ee | 242 | kvfree(chunk_array); |
efaa9646 AG |
243 | |
244 | /* Use this opportunity to fill in task info for the vm */ | |
245 | amdgpu_vm_set_task_info(vm); | |
246 | ||
1d263474 DC |
247 | return 0; |
248 | ||
249 | free_all_kdata: | |
250 | i = p->nchunks - 1; | |
251 | free_partial_kdata: | |
252 | for (; i >= 0; i--) | |
2098105e | 253 | kvfree(p->chunks[i].kdata); |
b4d916ee | 254 | kvfree(p->chunks); |
607523d1 DA |
255 | p->chunks = NULL; |
256 | p->nchunks = 0; | |
1d263474 | 257 | free_chunk: |
b4d916ee | 258 | kvfree(chunk_array); |
1d263474 DC |
259 | |
260 | return ret; | |
d38ceaf9 AD |
261 | } |
262 | ||
95844d20 MO |
263 | /* Convert microseconds to bytes. */ |
264 | static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) | |
265 | { | |
266 | if (us <= 0 || !adev->mm_stats.log2_max_MBps) | |
267 | return 0; | |
268 | ||
269 | /* Since accum_us is incremented by a million per second, just | |
270 | * multiply it by the number of MB/s to get the number of bytes. | |
271 | */ | |
272 | return us << adev->mm_stats.log2_max_MBps; | |
273 | } | |
274 | ||
275 | static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) | |
276 | { | |
277 | if (!adev->mm_stats.log2_max_MBps) | |
278 | return 0; | |
279 | ||
280 | return bytes >> adev->mm_stats.log2_max_MBps; | |
281 | } | |
282 | ||
283 | /* Returns how many bytes TTM can move right now. If no bytes can be moved, | |
284 | * it returns 0. If it returns non-zero, it's OK to move at least one buffer, | |
285 | * which means it can go over the threshold once. If that happens, the driver | |
286 | * will be in debt and no other buffer migrations can be done until that debt | |
287 | * is repaid. | |
288 | * | |
289 | * This approach allows moving a buffer of any size (it's important to allow | |
290 | * that). | |
291 | * | |
292 | * The currency is simply time in microseconds and it increases as the clock | |
293 | * ticks. The accumulated microseconds (us) are converted to bytes and | |
294 | * returned. | |
d38ceaf9 | 295 | */ |
00f06b24 JB |
296 | static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, |
297 | u64 *max_bytes, | |
298 | u64 *max_vis_bytes) | |
d38ceaf9 | 299 | { |
95844d20 | 300 | s64 time_us, increment_us; |
95844d20 | 301 | u64 free_vram, total_vram, used_vram; |
95844d20 MO |
302 | /* Allow a maximum of 200 accumulated ms. This is basically per-IB |
303 | * throttling. | |
d38ceaf9 | 304 | * |
95844d20 MO |
305 | * It means that in order to get full max MBps, at least 5 IBs per |
306 | * second must be submitted and not more than 200ms apart from each | |
307 | * other. | |
308 | */ | |
309 | const s64 us_upper_bound = 200000; | |
d38ceaf9 | 310 | |
00f06b24 JB |
311 | if (!adev->mm_stats.log2_max_MBps) { |
312 | *max_bytes = 0; | |
313 | *max_vis_bytes = 0; | |
314 | return; | |
315 | } | |
95844d20 | 316 | |
a5ccfe5c | 317 | total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); |
7db47b83 | 318 | used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); |
95844d20 MO |
319 | free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; |
320 | ||
321 | spin_lock(&adev->mm_stats.lock); | |
322 | ||
323 | /* Increase the amount of accumulated us. */ | |
324 | time_us = ktime_to_us(ktime_get()); | |
325 | increment_us = time_us - adev->mm_stats.last_update_us; | |
326 | adev->mm_stats.last_update_us = time_us; | |
327 | adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, | |
f3729f7b | 328 | us_upper_bound); |
95844d20 MO |
329 | |
330 | /* This prevents the short period of low performance when the VRAM | |
331 | * usage is low and the driver is in debt or doesn't have enough | |
332 | * accumulated us to fill VRAM quickly. | |
d38ceaf9 | 333 | * |
95844d20 MO |
334 | * The situation can occur in these cases: |
335 | * - a lot of VRAM is freed by userspace | |
336 | * - the presence of a big buffer causes a lot of evictions | |
337 | * (solution: split buffers into smaller ones) | |
d38ceaf9 | 338 | * |
95844d20 MO |
339 | * If 128 MB or 1/8th of VRAM is free, start filling it now by setting |
340 | * accum_us to a positive number. | |
d38ceaf9 | 341 | */ |
95844d20 MO |
342 | if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { |
343 | s64 min_us; | |
344 | ||
58398727 | 345 | /* Be more aggressive on dGPUs. Try to fill a portion of free |
95844d20 MO |
346 | * VRAM now. |
347 | */ | |
348 | if (!(adev->flags & AMD_IS_APU)) | |
349 | min_us = bytes_to_us(adev, free_vram / 4); | |
350 | else | |
351 | min_us = 0; /* Reset accum_us on APUs. */ | |
352 | ||
353 | adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); | |
354 | } | |
d38ceaf9 | 355 | |
00f06b24 | 356 | /* This is set to 0 if the driver is in debt to disallow (optional) |
95844d20 MO |
357 | * buffer moves. |
358 | */ | |
00f06b24 JB |
359 | *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); |
360 | ||
361 | /* Do the same for visible VRAM if half of it is free */ | |
c8c5e569 | 362 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { |
770d13b1 | 363 | u64 total_vis_vram = adev->gmc.visible_vram_size; |
3c848bb3 | 364 | u64 used_vis_vram = |
ec6aae97 | 365 | amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); |
00f06b24 JB |
366 | |
367 | if (used_vis_vram < total_vis_vram) { | |
368 | u64 free_vis_vram = total_vis_vram - used_vis_vram; | |
369 | adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + | |
370 | increment_us, us_upper_bound); | |
371 | ||
372 | if (free_vis_vram >= total_vis_vram / 2) | |
373 | adev->mm_stats.accum_us_vis = | |
374 | max(bytes_to_us(adev, free_vis_vram / 2), | |
375 | adev->mm_stats.accum_us_vis); | |
376 | } | |
377 | ||
378 | *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); | |
379 | } else { | |
380 | *max_vis_bytes = 0; | |
381 | } | |
95844d20 MO |
382 | |
383 | spin_unlock(&adev->mm_stats.lock); | |
95844d20 MO |
384 | } |
385 | ||
386 | /* Report how many bytes have really been moved for the last command | |
387 | * submission. This can result in a debt that can stop buffer migrations | |
388 | * temporarily. | |
389 | */ | |
00f06b24 JB |
390 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, |
391 | u64 num_vis_bytes) | |
95844d20 MO |
392 | { |
393 | spin_lock(&adev->mm_stats.lock); | |
394 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); | |
00f06b24 | 395 | adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); |
95844d20 | 396 | spin_unlock(&adev->mm_stats.lock); |
d38ceaf9 AD |
397 | } |
398 | ||
2a675640 | 399 | static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) |
14fd833e | 400 | { |
a7d64de6 | 401 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
2a675640 | 402 | struct amdgpu_cs_parser *p = param; |
9251859a RH |
403 | struct ttm_operation_ctx ctx = { |
404 | .interruptible = true, | |
405 | .no_wait_gpu = false, | |
c44dfe4d | 406 | .resv = bo->tbo.base.resv |
9251859a | 407 | }; |
14fd833e CZ |
408 | uint32_t domain; |
409 | int r; | |
410 | ||
4671078e | 411 | if (bo->tbo.pin_count) |
14fd833e CZ |
412 | return 0; |
413 | ||
95844d20 MO |
414 | /* Don't move this buffer if we have depleted our allowance |
415 | * to move it. Don't move anything if the threshold is zero. | |
14fd833e | 416 | */ |
4993ba02 CK |
417 | if (p->bytes_moved < p->bytes_moved_threshold && |
418 | (!bo->tbo.base.dma_buf || | |
419 | list_empty(&bo->tbo.base.dma_buf->attachments))) { | |
c8c5e569 | 420 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
00f06b24 JB |
421 | (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { |
422 | /* And don't move a CPU_ACCESS_REQUIRED BO to limited | |
423 | * visible VRAM if we've depleted our allowance to do | |
424 | * that. | |
425 | */ | |
426 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) | |
6d7d9c5a | 427 | domain = bo->preferred_domains; |
00f06b24 JB |
428 | else |
429 | domain = bo->allowed_domains; | |
430 | } else { | |
6d7d9c5a | 431 | domain = bo->preferred_domains; |
00f06b24 JB |
432 | } |
433 | } else { | |
14fd833e | 434 | domain = bo->allowed_domains; |
00f06b24 | 435 | } |
14fd833e CZ |
436 | |
437 | retry: | |
c704ab18 | 438 | amdgpu_bo_placement_from_domain(bo, domain); |
19be5570 | 439 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
6af046d2 CK |
440 | |
441 | p->bytes_moved += ctx.bytes_moved; | |
c8c5e569 | 442 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
5422a28f | 443 | amdgpu_bo_in_cpu_visible_vram(bo)) |
6af046d2 | 444 | p->bytes_moved_vis += ctx.bytes_moved; |
14fd833e | 445 | |
1abdc3d7 CK |
446 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
447 | domain = bo->allowed_domains; | |
448 | goto retry; | |
14fd833e CZ |
449 | } |
450 | ||
451 | return r; | |
452 | } | |
453 | ||
761c2e82 | 454 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
a5b75058 | 455 | struct list_head *validated) |
d38ceaf9 | 456 | { |
19be5570 | 457 | struct ttm_operation_ctx ctx = { true, false }; |
d38ceaf9 | 458 | struct amdgpu_bo_list_entry *lobj; |
d38ceaf9 AD |
459 | int r; |
460 | ||
a5b75058 | 461 | list_for_each_entry(lobj, validated, tv.head) { |
e83dfe4d | 462 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); |
cc325d19 | 463 | struct mm_struct *usermm; |
d38ceaf9 | 464 | |
cc325d19 CK |
465 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); |
466 | if (usermm && usermm != current->mm) | |
467 | return -EPERM; | |
468 | ||
899fbde1 PY |
469 | if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && |
470 | lobj->user_invalidated && lobj->user_pages) { | |
c704ab18 CK |
471 | amdgpu_bo_placement_from_domain(bo, |
472 | AMDGPU_GEM_DOMAIN_CPU); | |
19be5570 | 473 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
1b0c0f9d CK |
474 | if (r) |
475 | return r; | |
899fbde1 | 476 | |
a216ab09 CK |
477 | amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, |
478 | lobj->user_pages); | |
2f568dbd CK |
479 | } |
480 | ||
2a675640 | 481 | r = amdgpu_cs_bo_validate(p, bo); |
14fd833e | 482 | if (r) |
36409d12 | 483 | return r; |
662bfa61 | 484 | |
06f7f57e PY |
485 | kvfree(lobj->user_pages); |
486 | lobj->user_pages = NULL; | |
d38ceaf9 AD |
487 | } |
488 | return 0; | |
489 | } | |
490 | ||
2a7d9bda CK |
491 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, |
492 | union drm_amdgpu_cs *cs) | |
d38ceaf9 AD |
493 | { |
494 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |
4a102ad4 | 495 | struct amdgpu_vm *vm = &fpriv->vm; |
2f568dbd | 496 | struct amdgpu_bo_list_entry *e; |
a5b75058 | 497 | struct list_head duplicates; |
01d98506 ED |
498 | struct amdgpu_bo *gds; |
499 | struct amdgpu_bo *gws; | |
500 | struct amdgpu_bo *oa; | |
636ce25c | 501 | int r; |
d38ceaf9 | 502 | |
2a7d9bda CK |
503 | INIT_LIST_HEAD(&p->validated); |
504 | ||
964d0fbf | 505 | /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ |
0cb7c1f0 CK |
506 | if (cs->in.bo_list_handle) { |
507 | if (p->bo_list) | |
508 | return -EINVAL; | |
964d0fbf | 509 | |
52c054ca CK |
510 | r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, |
511 | &p->bo_list); | |
512 | if (r) | |
513 | return r; | |
4a102ad4 CK |
514 | } else if (!p->bo_list) { |
515 | /* Create a empty bo_list when no handle is provided */ | |
516 | r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, | |
517 | &p->bo_list); | |
518 | if (r) | |
519 | return r; | |
52c054ca CK |
520 | } |
521 | ||
5df79aeb LT |
522 | mutex_lock(&p->bo_list->bo_list_mutex); |
523 | ||
07daa8a0 | 524 | /* One for TTM and one for the CS job */ |
049aca43 | 525 | amdgpu_bo_list_for_each_entry(e, p->bo_list) |
07daa8a0 | 526 | e->tv.num_shared = 2; |
049aca43 | 527 | |
4a102ad4 | 528 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); |
d38ceaf9 | 529 | |
3c0eea6c | 530 | INIT_LIST_HEAD(&duplicates); |
56467ebf | 531 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); |
d38ceaf9 | 532 | |
e83dfe4d | 533 | if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) |
91acbeb6 CK |
534 | list_add(&p->uf_entry.tv.head, &p->validated); |
535 | ||
899fbde1 PY |
536 | /* Get userptr backing pages. If pages are updated after registered |
537 | * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do | |
538 | * amdgpu_ttm_backend_bind() to flush and invalidate new pages | |
539 | */ | |
540 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | |
541 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |
542 | bool userpage_invalidated = false; | |
543 | int i; | |
544 | ||
545 | e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, | |
546 | sizeof(struct page *), | |
547 | GFP_KERNEL | __GFP_ZERO); | |
548 | if (!e->user_pages) { | |
147ab7a1 | 549 | DRM_ERROR("kvmalloc_array failure\n"); |
068421b1 PY |
550 | r = -ENOMEM; |
551 | goto out_free_user_pages; | |
2f568dbd CK |
552 | } |
553 | ||
e5eaa7cc | 554 | r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); |
899fbde1 PY |
555 | if (r) { |
556 | kvfree(e->user_pages); | |
557 | e->user_pages = NULL; | |
3da2c382 | 558 | goto out_free_user_pages; |
2f568dbd CK |
559 | } |
560 | ||
899fbde1 PY |
561 | for (i = 0; i < bo->tbo.ttm->num_pages; i++) { |
562 | if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { | |
563 | userpage_invalidated = true; | |
564 | break; | |
2f568dbd CK |
565 | } |
566 | } | |
899fbde1 PY |
567 | e->user_invalidated = userpage_invalidated; |
568 | } | |
2f568dbd | 569 | |
899fbde1 | 570 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, |
9165fb87 | 571 | &duplicates); |
899fbde1 PY |
572 | if (unlikely(r != 0)) { |
573 | if (r != -ERESTARTSYS) | |
574 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); | |
3da2c382 | 575 | goto out_free_user_pages; |
2f568dbd | 576 | } |
a5b75058 | 577 | |
8c505bdc CK |
578 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
579 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |
580 | ||
581 | e->bo_va = amdgpu_vm_bo_find(vm, bo); | |
8c505bdc CK |
582 | } |
583 | ||
461fa7b0 KX |
584 | /* Move fence waiting after getting reservation lock of |
585 | * PD root. Then there is no need on a ctx mutex lock. | |
586 | */ | |
587 | r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity); | |
588 | if (unlikely(r != 0)) { | |
589 | if (r != -ERESTARTSYS) | |
590 | DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); | |
591 | goto error_validate; | |
592 | } | |
593 | ||
00f06b24 JB |
594 | amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, |
595 | &p->bytes_moved_vis_threshold); | |
f69f90a1 | 596 | p->bytes_moved = 0; |
00f06b24 | 597 | p->bytes_moved_vis = 0; |
f69f90a1 | 598 | |
f7da30d9 | 599 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, |
2a675640 | 600 | amdgpu_cs_bo_validate, p); |
f7da30d9 CK |
601 | if (r) { |
602 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); | |
603 | goto error_validate; | |
604 | } | |
605 | ||
f69f90a1 | 606 | r = amdgpu_cs_list_validate(p, &duplicates); |
a3e7738d | 607 | if (r) |
a5b75058 CK |
608 | goto error_validate; |
609 | ||
f69f90a1 | 610 | r = amdgpu_cs_list_validate(p, &p->validated); |
a3e7738d | 611 | if (r) |
a8480309 CK |
612 | goto error_validate; |
613 | ||
00f06b24 JB |
614 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, |
615 | p->bytes_moved_vis); | |
01d98506 | 616 | |
4a102ad4 CK |
617 | gds = p->bo_list->gds_obj; |
618 | gws = p->bo_list->gws_obj; | |
619 | oa = p->bo_list->oa_obj; | |
a8480309 | 620 | |
01d98506 | 621 | if (gds) { |
77a2faa5 CK |
622 | p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; |
623 | p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; | |
01d98506 ED |
624 | } |
625 | if (gws) { | |
77a2faa5 CK |
626 | p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; |
627 | p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; | |
01d98506 ED |
628 | } |
629 | if (oa) { | |
77a2faa5 CK |
630 | p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; |
631 | p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; | |
a8480309 | 632 | } |
a5b75058 | 633 | |
e83dfe4d CK |
634 | if (!r && p->uf_entry.tv.bo) { |
635 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); | |
c855e250 | 636 | |
c5835bbb | 637 | r = amdgpu_ttm_alloc_gart(&uf->tbo); |
c855e250 CK |
638 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); |
639 | } | |
b5f5acbc | 640 | |
a5b75058 | 641 | error_validate: |
047a1b87 | 642 | if (r) |
a5b75058 | 643 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); |
3da2c382 PY |
644 | |
645 | out_free_user_pages: | |
646 | if (r) { | |
647 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | |
648 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |
649 | ||
650 | if (!e->user_pages) | |
651 | continue; | |
652 | amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); | |
653 | kvfree(e->user_pages); | |
654 | e->user_pages = NULL; | |
655 | } | |
5df79aeb | 656 | mutex_unlock(&p->bo_list->bo_list_mutex); |
3da2c382 | 657 | } |
d38ceaf9 AD |
658 | return r; |
659 | } | |
660 | ||
661 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |
662 | { | |
114fbc31 | 663 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
d38ceaf9 AD |
664 | struct amdgpu_bo_list_entry *e; |
665 | int r; | |
666 | ||
667 | list_for_each_entry(e, &p->validated, tv.head) { | |
e83dfe4d | 668 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
52791eee | 669 | struct dma_resv *resv = bo->tbo.base.resv; |
5d319660 | 670 | enum amdgpu_sync_mode sync_mode; |
e83dfe4d | 671 | |
5d319660 CK |
672 | sync_mode = amdgpu_bo_explicit_sync(bo) ? |
673 | AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; | |
674 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, | |
675 | &fpriv->vm); | |
d38ceaf9 AD |
676 | if (r) |
677 | return r; | |
678 | } | |
679 | return 0; | |
680 | } | |
681 | ||
984810fc | 682 | /** |
3bffd71d | 683 | * amdgpu_cs_parser_fini() - clean parser states |
984810fc CK |
684 | * @parser: parser structure holding parsing context. |
685 | * @error: error number | |
fec3124d | 686 | * @backoff: indicator to backoff the reservation |
984810fc | 687 | * |
d5c90965 | 688 | * If error is set then unvalidate buffer, otherwise just free memory |
984810fc CK |
689 | * used by parsing context. |
690 | **/ | |
b6369225 CK |
691 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, |
692 | bool backoff) | |
049fc527 | 693 | { |
984810fc CK |
694 | unsigned i; |
695 | ||
5df79aeb | 696 | if (error && backoff) { |
d38ceaf9 AD |
697 | ttm_eu_backoff_reservation(&parser->ticket, |
698 | &parser->validated); | |
5df79aeb LT |
699 | mutex_unlock(&parser->bo_list->bo_list_mutex); |
700 | } | |
660e8558 | 701 | |
2624dd15 CZ |
702 | for (i = 0; i < parser->num_post_deps; i++) { |
703 | drm_syncobj_put(parser->post_deps[i].syncobj); | |
704 | kfree(parser->post_deps[i].chain); | |
705 | } | |
706 | kfree(parser->post_deps); | |
660e8558 | 707 | |
f54d1867 | 708 | dma_fence_put(parser->fence); |
7e52a81c | 709 | |
0ae94444 | 710 | if (parser->ctx) { |
94f4c496 | 711 | mutex_unlock(&parser->ctx->lock); |
3cb485f3 | 712 | amdgpu_ctx_put(parser->ctx); |
0ae94444 | 713 | } |
a3348bb8 CZ |
714 | if (parser->bo_list) |
715 | amdgpu_bo_list_put(parser->bo_list); | |
716 | ||
d38ceaf9 | 717 | for (i = 0; i < parser->nchunks; i++) |
2098105e | 718 | kvfree(parser->chunks[i].kdata); |
b4d916ee | 719 | kvfree(parser->chunks); |
50838c8c CK |
720 | if (parser->job) |
721 | amdgpu_job_free(parser->job); | |
e83dfe4d CK |
722 | if (parser->uf_entry.tv.bo) { |
723 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); | |
724 | ||
725 | amdgpu_bo_unref(&uf); | |
726 | } | |
d38ceaf9 AD |
727 | } |
728 | ||
9a02ece4 | 729 | static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) |
d38ceaf9 | 730 | { |
9a02ece4 | 731 | struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); |
b85891bd | 732 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
4a102ad4 | 733 | struct amdgpu_device *adev = p->adev; |
b85891bd | 734 | struct amdgpu_vm *vm = &fpriv->vm; |
4a102ad4 | 735 | struct amdgpu_bo_list_entry *e; |
d38ceaf9 AD |
736 | struct amdgpu_bo_va *bo_va; |
737 | struct amdgpu_bo *bo; | |
39f7f69a | 738 | int r; |
d38ceaf9 | 739 | |
9a02ece4 CK |
740 | /* Only for UVD/VCE VM emulation */ |
741 | if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { | |
742 | unsigned i, j; | |
743 | ||
744 | for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { | |
745 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |
746 | struct amdgpu_bo_va_mapping *m; | |
747 | struct amdgpu_bo *aobj = NULL; | |
748 | struct amdgpu_cs_chunk *chunk; | |
749 | uint64_t offset, va_start; | |
750 | struct amdgpu_ib *ib; | |
751 | uint8_t *kptr; | |
752 | ||
753 | chunk = &p->chunks[i]; | |
754 | ib = &p->job->ibs[j]; | |
755 | chunk_ib = chunk->kdata; | |
756 | ||
757 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | |
758 | continue; | |
759 | ||
ad9a5b78 | 760 | va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK; |
9a02ece4 CK |
761 | r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); |
762 | if (r) { | |
763 | DRM_ERROR("IB va_start is invalid\n"); | |
764 | return r; | |
765 | } | |
766 | ||
767 | if ((va_start + chunk_ib->ib_bytes) > | |
768 | (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { | |
769 | DRM_ERROR("IB va_start+ib_bytes is invalid\n"); | |
770 | return -EINVAL; | |
771 | } | |
772 | ||
773 | /* the IB should be reserved at this point */ | |
774 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); | |
775 | if (r) { | |
776 | return r; | |
777 | } | |
778 | ||
779 | offset = m->start * AMDGPU_GPU_PAGE_SIZE; | |
780 | kptr += va_start - offset; | |
781 | ||
782 | if (ring->funcs->parse_cs) { | |
783 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | |
784 | amdgpu_bo_kunmap(aobj); | |
785 | ||
96a2f0f2 | 786 | r = amdgpu_ring_parse_cs(ring, p, p->job, ib); |
9a02ece4 CK |
787 | if (r) |
788 | return r; | |
789 | } else { | |
790 | ib->ptr = (uint32_t *)kptr; | |
96a2f0f2 | 791 | r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib); |
9a02ece4 CK |
792 | amdgpu_bo_kunmap(aobj); |
793 | if (r) | |
794 | return r; | |
795 | } | |
796 | ||
797 | j++; | |
798 | } | |
799 | } | |
800 | ||
801 | if (!p->job->vm) | |
802 | return amdgpu_cs_sync_rings(p); | |
803 | ||
804 | ||
f3467818 | 805 | r = amdgpu_vm_clear_freed(adev, vm, NULL); |
d38ceaf9 AD |
806 | if (r) |
807 | return r; | |
808 | ||
8f8cc3fb | 809 | r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); |
b85891bd JZ |
810 | if (r) |
811 | return r; | |
812 | ||
5255e146 | 813 | r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update); |
b85891bd JZ |
814 | if (r) |
815 | return r; | |
816 | ||
f92d5c61 | 817 | if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { |
0f4b3c68 | 818 | bo_va = fpriv->csa_va; |
2493664f | 819 | BUG_ON(!bo_va); |
8f8cc3fb | 820 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
2493664f ML |
821 | if (r) |
822 | return r; | |
823 | ||
5255e146 | 824 | r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update); |
2493664f ML |
825 | if (r) |
826 | return r; | |
827 | } | |
828 | ||
4a102ad4 | 829 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
4a102ad4 | 830 | /* ignore duplicates */ |
e83dfe4d | 831 | bo = ttm_to_amdgpu_bo(e->tv.bo); |
4a102ad4 CK |
832 | if (!bo) |
833 | continue; | |
d38ceaf9 | 834 | |
4a102ad4 CK |
835 | bo_va = e->bo_va; |
836 | if (bo_va == NULL) | |
837 | continue; | |
d38ceaf9 | 838 | |
8f8cc3fb | 839 | r = amdgpu_vm_bo_update(adev, bo_va, false); |
bbca24d0 | 840 | if (r) |
4a102ad4 | 841 | return r; |
b495bd3a | 842 | |
5255e146 | 843 | r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update); |
bbca24d0 | 844 | if (r) |
4a102ad4 | 845 | return r; |
b495bd3a CK |
846 | } |
847 | ||
4e55eb38 | 848 | r = amdgpu_vm_handle_moved(adev, vm); |
d5884513 CK |
849 | if (r) |
850 | return r; | |
851 | ||
807e2994 | 852 | r = amdgpu_vm_update_pdes(adev, vm, false); |
0abc6878 CK |
853 | if (r) |
854 | return r; | |
855 | ||
5255e146 | 856 | r = amdgpu_sync_fence(&p->job->sync, vm->last_update); |
d5884513 CK |
857 | if (r) |
858 | return r; | |
b495bd3a | 859 | |
391629bd | 860 | p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); |
9a02ece4 | 861 | |
4a102ad4 | 862 | if (amdgpu_vm_debug) { |
b495bd3a | 863 | /* Invalidate all BOs to test for userspace bugs */ |
39f7f69a | 864 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
e83dfe4d | 865 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
ad864d24 | 866 | |
b495bd3a | 867 | /* ignore duplicates */ |
e83dfe4d | 868 | if (!bo) |
c5795c55 CK |
869 | continue; |
870 | ||
e83dfe4d | 871 | amdgpu_vm_bo_invalidate(adev, bo, false); |
d38ceaf9 | 872 | } |
45088efc CK |
873 | } |
874 | ||
9a79588c | 875 | return amdgpu_cs_sync_rings(p); |
d38ceaf9 AD |
876 | } |
877 | ||
d38ceaf9 AD |
878 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, |
879 | struct amdgpu_cs_parser *parser) | |
880 | { | |
881 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
882 | struct amdgpu_vm *vm = &fpriv->vm; | |
9a1b3af1 | 883 | int r, ce_preempt = 0, de_preempt = 0; |
0d346a14 CK |
884 | struct amdgpu_ring *ring; |
885 | int i, j; | |
d38ceaf9 | 886 | |
50838c8c | 887 | for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { |
d38ceaf9 AD |
888 | struct amdgpu_cs_chunk *chunk; |
889 | struct amdgpu_ib *ib; | |
890 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |
0d346a14 | 891 | struct drm_sched_entity *entity; |
d38ceaf9 AD |
892 | |
893 | chunk = &parser->chunks[i]; | |
50838c8c | 894 | ib = &parser->job->ibs[j]; |
d38ceaf9 AD |
895 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; |
896 | ||
897 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | |
898 | continue; | |
899 | ||
f92d5c61 JX |
900 | if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && |
901 | (amdgpu_mcbp || amdgpu_sriov_vf(adev))) { | |
e51a3226 | 902 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { |
65333e44 ML |
903 | if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) |
904 | ce_preempt++; | |
905 | else | |
906 | de_preempt++; | |
e51a3226 | 907 | } |
65333e44 ML |
908 | |
909 | /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ | |
910 | if (ce_preempt > 1 || de_preempt > 1) | |
e9d672b2 | 911 | return -EINVAL; |
9a1b3af1 ML |
912 | } |
913 | ||
0d346a14 CK |
914 | r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, |
915 | chunk_ib->ip_instance, chunk_ib->ring, | |
916 | &entity); | |
3ccec53c | 917 | if (r) |
d38ceaf9 | 918 | return r; |
d38ceaf9 | 919 | |
d98ff24e CK |
920 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
921 | parser->job->preamble_status |= | |
922 | AMDGPU_PREAMBLE_IB_PRESENT; | |
753ad49c | 923 | |
0d346a14 | 924 | if (parser->entity && parser->entity != entity) |
b07c60c0 CK |
925 | return -EINVAL; |
926 | ||
55414ad5 ND |
927 | /* Return if there is no run queue associated with this entity. |
928 | * Possibly because of disabled HW IP*/ | |
929 | if (entity->rq == NULL) | |
930 | return -EINVAL; | |
931 | ||
0d346a14 | 932 | parser->entity = entity; |
b07c60c0 | 933 | |
0d346a14 CK |
934 | ring = to_amdgpu_ring(entity->rq->sched); |
935 | r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? | |
9ecefb19 CK |
936 | chunk_ib->ib_bytes : 0, |
937 | AMDGPU_IB_POOL_DELAYED, ib); | |
ad864d24 AG |
938 | if (r) { |
939 | DRM_ERROR("Failed to get ib !\n"); | |
940 | return r; | |
d38ceaf9 | 941 | } |
d38ceaf9 | 942 | |
45088efc | 943 | ib->gpu_addr = chunk_ib->va_start; |
3ccec53c | 944 | ib->length_dw = chunk_ib->ib_bytes / 4; |
de807f81 | 945 | ib->flags = chunk_ib->flags; |
ad864d24 | 946 | |
d38ceaf9 AD |
947 | j++; |
948 | } | |
949 | ||
742b48ae | 950 | /* MM engine doesn't support user fences */ |
0d346a14 | 951 | ring = to_amdgpu_ring(parser->entity->rq->sched); |
742b48ae | 952 | if (parser->job->uf_addr && ring->funcs->no_user_fence) |
758ac17f | 953 | return -EINVAL; |
d38ceaf9 | 954 | |
461fa7b0 | 955 | return 0; |
d38ceaf9 AD |
956 | } |
957 | ||
6f0308eb DA |
958 | static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, |
959 | struct amdgpu_cs_chunk *chunk) | |
2b48d323 | 960 | { |
76a1ea61 | 961 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
6f0308eb DA |
962 | unsigned num_deps; |
963 | int i, r; | |
964 | struct drm_amdgpu_cs_chunk_dep *deps; | |
2b48d323 | 965 | |
6f0308eb DA |
966 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; |
967 | num_deps = chunk->length_dw * 4 / | |
968 | sizeof(struct drm_amdgpu_cs_chunk_dep); | |
2b48d323 | 969 | |
6f0308eb | 970 | for (i = 0; i < num_deps; ++i) { |
6f0308eb | 971 | struct amdgpu_ctx *ctx; |
0d346a14 | 972 | struct drm_sched_entity *entity; |
6f0308eb | 973 | struct dma_fence *fence; |
2b48d323 | 974 | |
6f0308eb DA |
975 | ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); |
976 | if (ctx == NULL) | |
977 | return -EINVAL; | |
2b48d323 | 978 | |
0d346a14 CK |
979 | r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, |
980 | deps[i].ip_instance, | |
981 | deps[i].ring, &entity); | |
6f0308eb DA |
982 | if (r) { |
983 | amdgpu_ctx_put(ctx); | |
984 | return r; | |
985 | } | |
2b48d323 | 986 | |
67d0859e CK |
987 | fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); |
988 | amdgpu_ctx_put(ctx); | |
989 | ||
990 | if (IS_ERR(fence)) | |
991 | return PTR_ERR(fence); | |
992 | else if (!fence) | |
993 | continue; | |
67dd1a36 AG |
994 | |
995 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { | |
67d0859e | 996 | struct drm_sched_fence *s_fence; |
67dd1a36 AG |
997 | struct dma_fence *old = fence; |
998 | ||
67d0859e | 999 | s_fence = to_drm_sched_fence(fence); |
67dd1a36 AG |
1000 | fence = dma_fence_get(&s_fence->scheduled); |
1001 | dma_fence_put(old); | |
1002 | } | |
1003 | ||
174b328b | 1004 | r = amdgpu_sync_fence(&p->job->sync, fence); |
67d0859e CK |
1005 | dma_fence_put(fence); |
1006 | if (r) | |
6f0308eb | 1007 | return r; |
6f0308eb DA |
1008 | } |
1009 | return 0; | |
1010 | } | |
2b48d323 | 1011 | |
660e8558 | 1012 | static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, |
2624dd15 CZ |
1013 | uint32_t handle, u64 point, |
1014 | u64 flags) | |
660e8558 | 1015 | { |
660e8558 | 1016 | struct dma_fence *fence; |
2624dd15 CZ |
1017 | int r; |
1018 | ||
1019 | r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); | |
1020 | if (r) { | |
1021 | DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", | |
1022 | handle, point, r); | |
660e8558 | 1023 | return r; |
2624dd15 | 1024 | } |
660e8558 | 1025 | |
174b328b | 1026 | r = amdgpu_sync_fence(&p->job->sync, fence); |
660e8558 DA |
1027 | dma_fence_put(fence); |
1028 | ||
1029 | return r; | |
1030 | } | |
1031 | ||
1032 | static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, | |
1033 | struct amdgpu_cs_chunk *chunk) | |
1034 | { | |
2624dd15 | 1035 | struct drm_amdgpu_cs_chunk_sem *deps; |
660e8558 DA |
1036 | unsigned num_deps; |
1037 | int i, r; | |
660e8558 DA |
1038 | |
1039 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; | |
1040 | num_deps = chunk->length_dw * 4 / | |
1041 | sizeof(struct drm_amdgpu_cs_chunk_sem); | |
2624dd15 CZ |
1042 | for (i = 0; i < num_deps; ++i) { |
1043 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, | |
1044 | 0, 0); | |
1045 | if (r) | |
1046 | return r; | |
1047 | } | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
660e8558 | 1052 | |
2624dd15 CZ |
1053 | static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, |
1054 | struct amdgpu_cs_chunk *chunk) | |
1055 | { | |
1056 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | |
1057 | unsigned num_deps; | |
1058 | int i, r; | |
1059 | ||
1060 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | |
1061 | num_deps = chunk->length_dw * 4 / | |
1062 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | |
660e8558 | 1063 | for (i = 0; i < num_deps; ++i) { |
2624dd15 CZ |
1064 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, |
1065 | syncobj_deps[i].handle, | |
1066 | syncobj_deps[i].point, | |
1067 | syncobj_deps[i].flags); | |
660e8558 DA |
1068 | if (r) |
1069 | return r; | |
1070 | } | |
2624dd15 | 1071 | |
660e8558 DA |
1072 | return 0; |
1073 | } | |
1074 | ||
1075 | static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, | |
1076 | struct amdgpu_cs_chunk *chunk) | |
1077 | { | |
2624dd15 | 1078 | struct drm_amdgpu_cs_chunk_sem *deps; |
660e8558 DA |
1079 | unsigned num_deps; |
1080 | int i; | |
2624dd15 | 1081 | |
660e8558 DA |
1082 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; |
1083 | num_deps = chunk->length_dw * 4 / | |
1084 | sizeof(struct drm_amdgpu_cs_chunk_sem); | |
1085 | ||
5a6a4c9d NH |
1086 | if (p->post_deps) |
1087 | return -EINVAL; | |
1088 | ||
2624dd15 CZ |
1089 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), |
1090 | GFP_KERNEL); | |
1091 | p->num_post_deps = 0; | |
660e8558 | 1092 | |
2624dd15 | 1093 | if (!p->post_deps) |
a1d6b190 CJ |
1094 | return -ENOMEM; |
1095 | ||
2624dd15 | 1096 | |
660e8558 | 1097 | for (i = 0; i < num_deps; ++i) { |
2624dd15 CZ |
1098 | p->post_deps[i].syncobj = |
1099 | drm_syncobj_find(p->filp, deps[i].handle); | |
1100 | if (!p->post_deps[i].syncobj) | |
660e8558 | 1101 | return -EINVAL; |
2624dd15 CZ |
1102 | p->post_deps[i].chain = NULL; |
1103 | p->post_deps[i].point = 0; | |
1104 | p->num_post_deps++; | |
660e8558 | 1105 | } |
2624dd15 CZ |
1106 | |
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | ||
1111 | static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, | |
5a6a4c9d | 1112 | struct amdgpu_cs_chunk *chunk) |
2624dd15 CZ |
1113 | { |
1114 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | |
1115 | unsigned num_deps; | |
1116 | int i; | |
1117 | ||
1118 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | |
1119 | num_deps = chunk->length_dw * 4 / | |
1120 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | |
1121 | ||
5a6a4c9d NH |
1122 | if (p->post_deps) |
1123 | return -EINVAL; | |
1124 | ||
2624dd15 CZ |
1125 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), |
1126 | GFP_KERNEL); | |
1127 | p->num_post_deps = 0; | |
1128 | ||
1129 | if (!p->post_deps) | |
1130 | return -ENOMEM; | |
1131 | ||
1132 | for (i = 0; i < num_deps; ++i) { | |
1133 | struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; | |
1134 | ||
1135 | dep->chain = NULL; | |
1136 | if (syncobj_deps[i].point) { | |
440d0f12 | 1137 | dep->chain = dma_fence_chain_alloc(); |
2624dd15 CZ |
1138 | if (!dep->chain) |
1139 | return -ENOMEM; | |
1140 | } | |
1141 | ||
1142 | dep->syncobj = drm_syncobj_find(p->filp, | |
1143 | syncobj_deps[i].handle); | |
1144 | if (!dep->syncobj) { | |
440d0f12 | 1145 | dma_fence_chain_free(dep->chain); |
2624dd15 CZ |
1146 | return -EINVAL; |
1147 | } | |
1148 | dep->point = syncobj_deps[i].point; | |
1149 | p->num_post_deps++; | |
1150 | } | |
1151 | ||
660e8558 DA |
1152 | return 0; |
1153 | } | |
1154 | ||
6f0308eb DA |
1155 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, |
1156 | struct amdgpu_cs_parser *p) | |
1157 | { | |
1158 | int i, r; | |
76a1ea61 | 1159 | |
94f4c496 CK |
1160 | /* TODO: Investigate why we still need the context lock */ |
1161 | mutex_unlock(&p->ctx->lock); | |
1162 | ||
6f0308eb DA |
1163 | for (i = 0; i < p->nchunks; ++i) { |
1164 | struct amdgpu_cs_chunk *chunk; | |
effd924d | 1165 | |
6f0308eb | 1166 | chunk = &p->chunks[i]; |
91e1a520 | 1167 | |
2624dd15 CZ |
1168 | switch (chunk->chunk_id) { |
1169 | case AMDGPU_CHUNK_ID_DEPENDENCIES: | |
1170 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: | |
6f0308eb DA |
1171 | r = amdgpu_cs_process_fence_dep(p, chunk); |
1172 | if (r) | |
94f4c496 | 1173 | goto out; |
2624dd15 CZ |
1174 | break; |
1175 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: | |
660e8558 DA |
1176 | r = amdgpu_cs_process_syncobj_in_dep(p, chunk); |
1177 | if (r) | |
94f4c496 | 1178 | goto out; |
2624dd15 CZ |
1179 | break; |
1180 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: | |
660e8558 DA |
1181 | r = amdgpu_cs_process_syncobj_out_dep(p, chunk); |
1182 | if (r) | |
94f4c496 | 1183 | goto out; |
2624dd15 CZ |
1184 | break; |
1185 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: | |
1186 | r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); | |
1187 | if (r) | |
94f4c496 | 1188 | goto out; |
2624dd15 CZ |
1189 | break; |
1190 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: | |
1191 | r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); | |
1192 | if (r) | |
94f4c496 | 1193 | goto out; |
2624dd15 | 1194 | break; |
2b48d323 CK |
1195 | } |
1196 | } | |
1197 | ||
94f4c496 CK |
1198 | out: |
1199 | mutex_lock(&p->ctx->lock); | |
1200 | return r; | |
2b48d323 CK |
1201 | } |
1202 | ||
660e8558 DA |
1203 | static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) |
1204 | { | |
1205 | int i; | |
1206 | ||
2624dd15 CZ |
1207 | for (i = 0; i < p->num_post_deps; ++i) { |
1208 | if (p->post_deps[i].chain && p->post_deps[i].point) { | |
1209 | drm_syncobj_add_point(p->post_deps[i].syncobj, | |
1210 | p->post_deps[i].chain, | |
1211 | p->fence, p->post_deps[i].point); | |
1212 | p->post_deps[i].chain = NULL; | |
1213 | } else { | |
1214 | drm_syncobj_replace_fence(p->post_deps[i].syncobj, | |
1215 | p->fence); | |
1216 | } | |
1217 | } | |
660e8558 DA |
1218 | } |
1219 | ||
cd75dc68 CK |
1220 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
1221 | union drm_amdgpu_cs *cs) | |
1222 | { | |
8ab19ea6 | 1223 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
0d346a14 | 1224 | struct drm_sched_entity *entity = p->entity; |
4a102ad4 | 1225 | struct amdgpu_bo_list_entry *e; |
cd75dc68 | 1226 | struct amdgpu_job *job; |
eb01abc7 | 1227 | uint64_t seq; |
e686941a | 1228 | int r; |
cd75dc68 | 1229 | |
4a2de54d CK |
1230 | job = p->job; |
1231 | p->job = NULL; | |
1232 | ||
114fbc31 | 1233 | r = drm_sched_job_init(&job->base, entity, &fpriv->vm); |
4a2de54d CK |
1234 | if (r) |
1235 | goto error_unlock; | |
1236 | ||
dbe48d03 DV |
1237 | drm_sched_job_arm(&job->base); |
1238 | ||
81fa1af3 JG |
1239 | /* No memory allocation is allowed while holding the notifier lock. |
1240 | * The lock is held until amdgpu_cs_submit is finished and fence is | |
1241 | * added to BOs. | |
899fbde1 | 1242 | */ |
81fa1af3 | 1243 | mutex_lock(&p->adev->notifier_lock); |
899fbde1 PY |
1244 | |
1245 | /* If userptr are invalidated after amdgpu_cs_parser_bos(), return | |
1246 | * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. | |
1247 | */ | |
4a102ad4 | 1248 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
e83dfe4d | 1249 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); |
39f7f69a | 1250 | |
899fbde1 PY |
1251 | r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); |
1252 | } | |
1253 | if (r) { | |
1254 | r = -EAGAIN; | |
1255 | goto error_abort; | |
3fe89771 CK |
1256 | } |
1257 | ||
f54d1867 | 1258 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
660e8558 | 1259 | |
69493c03 | 1260 | seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence); |
660e8558 DA |
1261 | amdgpu_cs_post_dependencies(p); |
1262 | ||
d98ff24e CK |
1263 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && |
1264 | !p->ctx->preamble_presented) { | |
1265 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | |
1266 | p->ctx->preamble_presented = true; | |
1267 | } | |
1268 | ||
eb01abc7 ML |
1269 | cs->out.handle = seq; |
1270 | job->uf_sequence = seq; | |
1271 | ||
a5fb4ec2 | 1272 | amdgpu_job_free_resources(job); |
cd75dc68 CK |
1273 | |
1274 | trace_amdgpu_cs_ioctl(job); | |
8ab19ea6 | 1275 | amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); |
0e10e9a1 | 1276 | drm_sched_entity_push_job(&job->base); |
3fe89771 | 1277 | |
b995795b CK |
1278 | amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); |
1279 | ||
047a1b87 CK |
1280 | /* Make sure all BOs are remembered as writers */ |
1281 | amdgpu_bo_list_for_each_entry(e, p->bo_list) | |
1282 | e->tv.num_shared = 0; | |
8c505bdc | 1283 | |
3fe89771 | 1284 | ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); |
81fa1af3 | 1285 | mutex_unlock(&p->adev->notifier_lock); |
5df79aeb | 1286 | mutex_unlock(&p->bo_list->bo_list_mutex); |
3fe89771 | 1287 | |
cd75dc68 | 1288 | return 0; |
4a2de54d CK |
1289 | |
1290 | error_abort: | |
26efecf9 | 1291 | drm_sched_job_cleanup(&job->base); |
81fa1af3 | 1292 | mutex_unlock(&p->adev->notifier_lock); |
4a2de54d CK |
1293 | |
1294 | error_unlock: | |
1295 | amdgpu_job_free(job); | |
4a2de54d | 1296 | return r; |
cd75dc68 CK |
1297 | } |
1298 | ||
44444574 KW |
1299 | static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser) |
1300 | { | |
1301 | int i; | |
1302 | ||
1303 | if (!trace_amdgpu_cs_enabled()) | |
1304 | return; | |
1305 | ||
1306 | for (i = 0; i < parser->job->num_ibs; i++) | |
1307 | trace_amdgpu_cs(parser, i); | |
1308 | } | |
1309 | ||
049fc527 CZ |
1310 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
1311 | { | |
1348969a | 1312 | struct amdgpu_device *adev = drm_to_adev(dev); |
049fc527 | 1313 | union drm_amdgpu_cs *cs = data; |
7e52a81c | 1314 | struct amdgpu_cs_parser parser = {}; |
26a6980c | 1315 | bool reserved_buffers = false; |
44444574 | 1316 | int r; |
049fc527 | 1317 | |
7c6e68c7 AG |
1318 | if (amdgpu_ras_intr_triggered()) |
1319 | return -EHWPOISON; | |
1320 | ||
0c418f10 | 1321 | if (!adev->accel_working) |
049fc527 | 1322 | return -EBUSY; |
2b48d323 | 1323 | |
7e52a81c CK |
1324 | parser.adev = adev; |
1325 | parser.filp = filp; | |
1326 | ||
1327 | r = amdgpu_cs_parser_init(&parser, data); | |
d38ceaf9 | 1328 | if (r) { |
8e1d88f9 | 1329 | if (printk_ratelimit()) |
1330 | DRM_ERROR("Failed to initialize parser %d!\n", r); | |
a414cd70 | 1331 | goto out; |
26a6980c CK |
1332 | } |
1333 | ||
ad864d24 AG |
1334 | r = amdgpu_cs_ib_fill(adev, &parser); |
1335 | if (r) | |
1336 | goto out; | |
1337 | ||
7e7bf8de CZ |
1338 | r = amdgpu_cs_dependencies(adev, &parser); |
1339 | if (r) { | |
1340 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | |
1341 | goto out; | |
1342 | } | |
1343 | ||
a414cd70 HR |
1344 | r = amdgpu_cs_parser_bos(&parser, data); |
1345 | if (r) { | |
1346 | if (r == -ENOMEM) | |
1347 | DRM_ERROR("Not enough memory for command submission!\n"); | |
a3e7738d | 1348 | else if (r != -ERESTARTSYS && r != -EAGAIN) |
a414cd70 HR |
1349 | DRM_ERROR("Failed to process the buffer list %d!\n", r); |
1350 | goto out; | |
26a6980c CK |
1351 | } |
1352 | ||
a414cd70 | 1353 | reserved_buffers = true; |
26a6980c | 1354 | |
44444574 | 1355 | trace_amdgpu_cs_ibs(&parser); |
26a6980c | 1356 | |
9a02ece4 | 1357 | r = amdgpu_cs_vm_handling(&parser); |
4fe63117 CZ |
1358 | if (r) |
1359 | goto out; | |
1360 | ||
4acabfe3 | 1361 | r = amdgpu_cs_submit(&parser, cs); |
94f4c496 | 1362 | |
d38ceaf9 | 1363 | out: |
7e52a81c | 1364 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
899fbde1 | 1365 | |
d38ceaf9 AD |
1366 | return r; |
1367 | } | |
1368 | ||
1369 | /** | |
1370 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | |
1371 | * | |
1372 | * @dev: drm device | |
1373 | * @data: data from userspace | |
1374 | * @filp: file private | |
1375 | * | |
1376 | * Wait for the command submission identified by handle to finish. | |
1377 | */ | |
1378 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |
1379 | struct drm_file *filp) | |
1380 | { | |
1381 | union drm_amdgpu_wait_cs *wait = data; | |
d38ceaf9 | 1382 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
0d346a14 | 1383 | struct drm_sched_entity *entity; |
66b3cf2a | 1384 | struct amdgpu_ctx *ctx; |
f54d1867 | 1385 | struct dma_fence *fence; |
d38ceaf9 AD |
1386 | long r; |
1387 | ||
66b3cf2a JZ |
1388 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
1389 | if (ctx == NULL) | |
1390 | return -EINVAL; | |
d38ceaf9 | 1391 | |
0d346a14 CK |
1392 | r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, |
1393 | wait->in.ring, &entity); | |
effd924d AR |
1394 | if (r) { |
1395 | amdgpu_ctx_put(ctx); | |
1396 | return r; | |
1397 | } | |
1398 | ||
0d346a14 | 1399 | fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); |
4b559c90 CZ |
1400 | if (IS_ERR(fence)) |
1401 | r = PTR_ERR(fence); | |
1402 | else if (fence) { | |
f54d1867 | 1403 | r = dma_fence_wait_timeout(fence, true, timeout); |
7a0a48dd CK |
1404 | if (r > 0 && fence->error) |
1405 | r = fence->error; | |
f54d1867 | 1406 | dma_fence_put(fence); |
4b559c90 CZ |
1407 | } else |
1408 | r = 1; | |
049fc527 | 1409 | |
66b3cf2a | 1410 | amdgpu_ctx_put(ctx); |
d38ceaf9 AD |
1411 | if (r < 0) |
1412 | return r; | |
1413 | ||
1414 | memset(wait, 0, sizeof(*wait)); | |
1415 | wait->out.status = (r == 0); | |
1416 | ||
1417 | return 0; | |
1418 | } | |
1419 | ||
eef18a82 JZ |
1420 | /** |
1421 | * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence | |
1422 | * | |
1423 | * @adev: amdgpu device | |
1424 | * @filp: file private | |
1425 | * @user: drm_amdgpu_fence copied from user space | |
1426 | */ | |
1427 | static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, | |
1428 | struct drm_file *filp, | |
1429 | struct drm_amdgpu_fence *user) | |
1430 | { | |
0d346a14 | 1431 | struct drm_sched_entity *entity; |
eef18a82 JZ |
1432 | struct amdgpu_ctx *ctx; |
1433 | struct dma_fence *fence; | |
1434 | int r; | |
1435 | ||
eef18a82 JZ |
1436 | ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); |
1437 | if (ctx == NULL) | |
1438 | return ERR_PTR(-EINVAL); | |
1439 | ||
0d346a14 CK |
1440 | r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, |
1441 | user->ring, &entity); | |
effd924d AR |
1442 | if (r) { |
1443 | amdgpu_ctx_put(ctx); | |
1444 | return ERR_PTR(r); | |
1445 | } | |
1446 | ||
0d346a14 | 1447 | fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); |
eef18a82 JZ |
1448 | amdgpu_ctx_put(ctx); |
1449 | ||
1450 | return fence; | |
1451 | } | |
1452 | ||
7ca24cf2 MO |
1453 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, |
1454 | struct drm_file *filp) | |
1455 | { | |
1348969a | 1456 | struct amdgpu_device *adev = drm_to_adev(dev); |
7ca24cf2 MO |
1457 | union drm_amdgpu_fence_to_handle *info = data; |
1458 | struct dma_fence *fence; | |
1459 | struct drm_syncobj *syncobj; | |
1460 | struct sync_file *sync_file; | |
1461 | int fd, r; | |
1462 | ||
7ca24cf2 MO |
1463 | fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); |
1464 | if (IS_ERR(fence)) | |
1465 | return PTR_ERR(fence); | |
1466 | ||
4e917713 CK |
1467 | if (!fence) |
1468 | fence = dma_fence_get_stub(); | |
1469 | ||
7ca24cf2 MO |
1470 | switch (info->in.what) { |
1471 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: | |
1472 | r = drm_syncobj_create(&syncobj, 0, fence); | |
1473 | dma_fence_put(fence); | |
1474 | if (r) | |
1475 | return r; | |
1476 | r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); | |
1477 | drm_syncobj_put(syncobj); | |
1478 | return r; | |
1479 | ||
1480 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: | |
1481 | r = drm_syncobj_create(&syncobj, 0, fence); | |
1482 | dma_fence_put(fence); | |
1483 | if (r) | |
1484 | return r; | |
c4c5ae67 | 1485 | r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); |
7ca24cf2 MO |
1486 | drm_syncobj_put(syncobj); |
1487 | return r; | |
1488 | ||
1489 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: | |
1490 | fd = get_unused_fd_flags(O_CLOEXEC); | |
1491 | if (fd < 0) { | |
1492 | dma_fence_put(fence); | |
1493 | return fd; | |
1494 | } | |
1495 | ||
1496 | sync_file = sync_file_create(fence); | |
1497 | dma_fence_put(fence); | |
1498 | if (!sync_file) { | |
1499 | put_unused_fd(fd); | |
1500 | return -ENOMEM; | |
1501 | } | |
1502 | ||
1503 | fd_install(fd, sync_file->file); | |
1504 | info->out.handle = fd; | |
1505 | return 0; | |
1506 | ||
1507 | default: | |
dfced44f | 1508 | dma_fence_put(fence); |
7ca24cf2 MO |
1509 | return -EINVAL; |
1510 | } | |
1511 | } | |
1512 | ||
eef18a82 | 1513 | /** |
3bffd71d | 1514 | * amdgpu_cs_wait_all_fences - wait on all fences to signal |
eef18a82 JZ |
1515 | * |
1516 | * @adev: amdgpu device | |
1517 | * @filp: file private | |
1518 | * @wait: wait parameters | |
1519 | * @fences: array of drm_amdgpu_fence | |
1520 | */ | |
1521 | static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, | |
1522 | struct drm_file *filp, | |
1523 | union drm_amdgpu_wait_fences *wait, | |
1524 | struct drm_amdgpu_fence *fences) | |
1525 | { | |
1526 | uint32_t fence_count = wait->in.fence_count; | |
1527 | unsigned int i; | |
1528 | long r = 1; | |
1529 | ||
1530 | for (i = 0; i < fence_count; i++) { | |
1531 | struct dma_fence *fence; | |
1532 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |
1533 | ||
1534 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |
1535 | if (IS_ERR(fence)) | |
1536 | return PTR_ERR(fence); | |
1537 | else if (!fence) | |
1538 | continue; | |
1539 | ||
1540 | r = dma_fence_wait_timeout(fence, true, timeout); | |
32df87df | 1541 | dma_fence_put(fence); |
eef18a82 JZ |
1542 | if (r < 0) |
1543 | return r; | |
1544 | ||
1545 | if (r == 0) | |
1546 | break; | |
7a0a48dd CK |
1547 | |
1548 | if (fence->error) | |
1549 | return fence->error; | |
eef18a82 JZ |
1550 | } |
1551 | ||
1552 | memset(wait, 0, sizeof(*wait)); | |
1553 | wait->out.status = (r > 0); | |
1554 | ||
1555 | return 0; | |
1556 | } | |
1557 | ||
1558 | /** | |
1559 | * amdgpu_cs_wait_any_fence - wait on any fence to signal | |
1560 | * | |
1561 | * @adev: amdgpu device | |
1562 | * @filp: file private | |
1563 | * @wait: wait parameters | |
1564 | * @fences: array of drm_amdgpu_fence | |
1565 | */ | |
1566 | static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, | |
1567 | struct drm_file *filp, | |
1568 | union drm_amdgpu_wait_fences *wait, | |
1569 | struct drm_amdgpu_fence *fences) | |
1570 | { | |
1571 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |
1572 | uint32_t fence_count = wait->in.fence_count; | |
1573 | uint32_t first = ~0; | |
1574 | struct dma_fence **array; | |
1575 | unsigned int i; | |
1576 | long r; | |
1577 | ||
1578 | /* Prepare the fence array */ | |
1579 | array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); | |
1580 | ||
1581 | if (array == NULL) | |
1582 | return -ENOMEM; | |
1583 | ||
1584 | for (i = 0; i < fence_count; i++) { | |
1585 | struct dma_fence *fence; | |
1586 | ||
1587 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |
1588 | if (IS_ERR(fence)) { | |
1589 | r = PTR_ERR(fence); | |
1590 | goto err_free_fence_array; | |
1591 | } else if (fence) { | |
1592 | array[i] = fence; | |
1593 | } else { /* NULL, the fence has been already signaled */ | |
1594 | r = 1; | |
a2138eaf | 1595 | first = i; |
eef18a82 JZ |
1596 | goto out; |
1597 | } | |
1598 | } | |
1599 | ||
1600 | r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, | |
1601 | &first); | |
1602 | if (r < 0) | |
1603 | goto err_free_fence_array; | |
1604 | ||
1605 | out: | |
1606 | memset(wait, 0, sizeof(*wait)); | |
1607 | wait->out.status = (r > 0); | |
1608 | wait->out.first_signaled = first; | |
cdadab89 | 1609 | |
eb174c77 | 1610 | if (first < fence_count && array[first]) |
cdadab89 ED |
1611 | r = array[first]->error; |
1612 | else | |
1613 | r = 0; | |
eef18a82 JZ |
1614 | |
1615 | err_free_fence_array: | |
1616 | for (i = 0; i < fence_count; i++) | |
1617 | dma_fence_put(array[i]); | |
1618 | kfree(array); | |
1619 | ||
1620 | return r; | |
1621 | } | |
1622 | ||
1623 | /** | |
1624 | * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish | |
1625 | * | |
1626 | * @dev: drm device | |
1627 | * @data: data from userspace | |
1628 | * @filp: file private | |
1629 | */ | |
1630 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, | |
1631 | struct drm_file *filp) | |
1632 | { | |
1348969a | 1633 | struct amdgpu_device *adev = drm_to_adev(dev); |
eef18a82 JZ |
1634 | union drm_amdgpu_wait_fences *wait = data; |
1635 | uint32_t fence_count = wait->in.fence_count; | |
1636 | struct drm_amdgpu_fence *fences_user; | |
1637 | struct drm_amdgpu_fence *fences; | |
1638 | int r; | |
1639 | ||
1640 | /* Get the fences from userspace */ | |
1641 | fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), | |
1642 | GFP_KERNEL); | |
1643 | if (fences == NULL) | |
1644 | return -ENOMEM; | |
1645 | ||
7ecc245a | 1646 | fences_user = u64_to_user_ptr(wait->in.fences); |
eef18a82 JZ |
1647 | if (copy_from_user(fences, fences_user, |
1648 | sizeof(struct drm_amdgpu_fence) * fence_count)) { | |
1649 | r = -EFAULT; | |
1650 | goto err_free_fences; | |
1651 | } | |
1652 | ||
1653 | if (wait->in.wait_all) | |
1654 | r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); | |
1655 | else | |
1656 | r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); | |
1657 | ||
1658 | err_free_fences: | |
1659 | kfree(fences); | |
1660 | ||
1661 | return r; | |
1662 | } | |
1663 | ||
d38ceaf9 | 1664 | /** |
3bffd71d | 1665 | * amdgpu_cs_find_mapping - find bo_va for VM address |
d38ceaf9 AD |
1666 | * |
1667 | * @parser: command submission parser context | |
1668 | * @addr: VM address | |
1669 | * @bo: resulting BO of the mapping found | |
fec3124d | 1670 | * @map: Placeholder to return found BO mapping |
d38ceaf9 AD |
1671 | * |
1672 | * Search the buffer objects in the command submission context for a certain | |
1673 | * virtual memory address. Returns allocation structure when found, NULL | |
1674 | * otherwise. | |
1675 | */ | |
9cca0b8e CK |
1676 | int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
1677 | uint64_t addr, struct amdgpu_bo **bo, | |
1678 | struct amdgpu_bo_va_mapping **map) | |
d38ceaf9 | 1679 | { |
aebc5e6f | 1680 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
19be5570 | 1681 | struct ttm_operation_ctx ctx = { false, false }; |
aebc5e6f | 1682 | struct amdgpu_vm *vm = &fpriv->vm; |
d38ceaf9 | 1683 | struct amdgpu_bo_va_mapping *mapping; |
c855e250 CK |
1684 | int r; |
1685 | ||
d38ceaf9 | 1686 | addr /= AMDGPU_GPU_PAGE_SIZE; |
c855e250 | 1687 | |
aebc5e6f CK |
1688 | mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); |
1689 | if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) | |
1690 | return -EINVAL; | |
c855e250 | 1691 | |
aebc5e6f CK |
1692 | *bo = mapping->bo_va->base.bo; |
1693 | *map = mapping; | |
03f48dd5 | 1694 | |
aebc5e6f | 1695 | /* Double check that the BO is reserved by this CS */ |
52791eee | 1696 | if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) |
aebc5e6f | 1697 | return -EINVAL; |
03f48dd5 | 1698 | |
4b6b691e CK |
1699 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { |
1700 | (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | |
c704ab18 | 1701 | amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); |
19be5570 | 1702 | r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); |
4b6b691e | 1703 | if (r) |
03f48dd5 | 1704 | return r; |
c855e250 CK |
1705 | } |
1706 | ||
c5835bbb | 1707 | return amdgpu_ttm_alloc_gart(&(*bo)->tbo); |
c855e250 | 1708 | } |