2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
32 #include <drm/amdgpu_drm.h>
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
41 * GPUVM is similar to the legacy gart on older asics, however
42 * rather than there being a single global gart table
43 * for the entire GPU, there are multiple VM page tables active
44 * at any given time. The VM page tables can contain a mix
45 * vram pages and system memory pages and system memory pages
46 * can be mapped as snooped (cached system pages) or unsnooped
47 * (uncached system pages).
48 * Each VM has an ID associated with it and there is a page table
49 * associated with each VMID. When execting a command buffer,
50 * the kernel tells the the ring what VMID to use for that command
51 * buffer. VMIDs are allocated dynamically as commands are submitted.
52 * The userspace drivers maintain their own address space and the kernel
53 * sets up their pages tables accordingly when they submit their
54 * command buffers and a VMID is assigned.
55 * Cayman/Trinity support up to 8 active VMs at any given time;
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
63 START
, LAST
, static, amdgpu_vm_it
)
69 * struct amdgpu_pte_update_params - Local structure
71 * Encapsulate some VM table update parameters to reduce
72 * the number of function parameters
75 struct amdgpu_pte_update_params
{
78 * @adev: amdgpu device we do this update for
80 struct amdgpu_device
*adev
;
83 * @vm: optional amdgpu_vm we do this update for
88 * @src: address where to copy page table entries from
93 * @ib: indirect buffer to fill with commands
98 * @func: Function which actually does the update
100 void (*func
)(struct amdgpu_pte_update_params
*params
,
101 struct amdgpu_bo
*bo
, uint64_t pe
,
102 uint64_t addr
, unsigned count
, uint32_t incr
,
107 * DMA addresses to use for mapping, used during VM update by CPU
109 dma_addr_t
*pages_addr
;
114 * Kernel pointer of PD/PT BO that needs to be updated,
115 * used during VM update by CPU
121 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
123 struct amdgpu_prt_cb
{
126 * @adev: amdgpu device
128 struct amdgpu_device
*adev
;
133 struct dma_fence_cb cb
;
137 * amdgpu_vm_level_shift - return the addr shift for each level
139 * @adev: amdgpu_device pointer
143 * The number of bits the pfn needs to be right shifted for a level.
145 static unsigned amdgpu_vm_level_shift(struct amdgpu_device
*adev
,
148 unsigned shift
= 0xff;
154 shift
= 9 * (AMDGPU_VM_PDB0
- level
) +
155 adev
->vm_manager
.block_size
;
161 dev_err(adev
->dev
, "the level%d isn't supported.\n", level
);
168 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
170 * @adev: amdgpu_device pointer
174 * The number of entries in a page directory or page table.
176 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
179 unsigned shift
= amdgpu_vm_level_shift(adev
,
180 adev
->vm_manager
.root_level
);
182 if (level
== adev
->vm_manager
.root_level
)
183 /* For the root directory */
184 return round_up(adev
->vm_manager
.max_pfn
, 1 << shift
) >> shift
;
185 else if (level
!= AMDGPU_VM_PTB
)
186 /* Everything in between */
189 /* For the page tables on the leaves */
190 return AMDGPU_VM_PTE_COUNT(adev
);
194 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
196 * @adev: amdgpu_device pointer
200 * The mask to extract the entry number of a PD/PT from an address.
202 static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device
*adev
,
205 if (level
<= adev
->vm_manager
.root_level
)
207 else if (level
!= AMDGPU_VM_PTB
)
210 return AMDGPU_VM_PTE_COUNT(adev
) - 1;
214 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
216 * @adev: amdgpu_device pointer
220 * The size of the BO for a page directory or page table in bytes.
222 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
224 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
228 * amdgpu_vm_bo_evicted - vm_bo is evicted
230 * @vm_bo: vm_bo which is evicted
232 * State for PDs/PTs and per VM BOs which are not at the location they should
235 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base
*vm_bo
)
237 struct amdgpu_vm
*vm
= vm_bo
->vm
;
238 struct amdgpu_bo
*bo
= vm_bo
->bo
;
241 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
242 list_move(&vm_bo
->vm_status
, &vm
->evicted
);
244 list_move_tail(&vm_bo
->vm_status
, &vm
->evicted
);
248 * amdgpu_vm_bo_relocated - vm_bo is reloacted
250 * @vm_bo: vm_bo which is relocated
252 * State for PDs/PTs which needs to update their parent PD.
254 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base
*vm_bo
)
256 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->relocated
);
260 * amdgpu_vm_bo_moved - vm_bo is moved
262 * @vm_bo: vm_bo which is moved
264 * State for per VM BOs which are moved, but that change is not yet reflected
265 * in the page tables.
267 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base
*vm_bo
)
269 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->moved
);
273 * amdgpu_vm_bo_idle - vm_bo is idle
275 * @vm_bo: vm_bo which is now idle
277 * State for PDs/PTs and per VM BOs which have gone through the state machine
280 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base
*vm_bo
)
282 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->idle
);
283 vm_bo
->moved
= false;
287 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
289 * @vm_bo: vm_bo which is now invalidated
291 * State for normal BOs which are invalidated and that change not yet reflected
294 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base
*vm_bo
)
296 spin_lock(&vm_bo
->vm
->invalidated_lock
);
297 list_move(&vm_bo
->vm_status
, &vm_bo
->vm
->invalidated
);
298 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
302 * amdgpu_vm_bo_done - vm_bo is done
304 * @vm_bo: vm_bo which is now done
306 * State for normal BOs which are invalidated and that change has been updated
309 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base
*vm_bo
)
311 spin_lock(&vm_bo
->vm
->invalidated_lock
);
312 list_del_init(&vm_bo
->vm_status
);
313 spin_unlock(&vm_bo
->vm
->invalidated_lock
);
317 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
319 * @base: base structure for tracking BO usage in a VM
320 * @vm: vm to which bo is to be added
321 * @bo: amdgpu buffer object
323 * Initialize a bo_va_base structure and add it to the appropriate lists
326 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base
*base
,
327 struct amdgpu_vm
*vm
,
328 struct amdgpu_bo
*bo
)
333 INIT_LIST_HEAD(&base
->vm_status
);
337 base
->next
= bo
->vm_bo
;
340 if (bo
->tbo
.resv
!= vm
->root
.base
.bo
->tbo
.resv
)
343 vm
->bulk_moveable
= false;
344 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
345 amdgpu_vm_bo_relocated(base
);
347 amdgpu_vm_bo_idle(base
);
349 if (bo
->preferred_domains
&
350 amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
))
354 * we checked all the prerequisites, but it looks like this per vm bo
355 * is currently evicted. add the bo to the evicted list to make sure it
356 * is validated on next vm use to avoid fault.
358 amdgpu_vm_bo_evicted(base
);
362 * amdgpu_vm_pt_parent - get the parent page directory
364 * @pt: child page table
366 * Helper to get the parent entry for the child page table. NULL if we are at
367 * the root page directory.
369 static struct amdgpu_vm_pt
*amdgpu_vm_pt_parent(struct amdgpu_vm_pt
*pt
)
371 struct amdgpu_bo
*parent
= pt
->base
.bo
->parent
;
376 return container_of(parent
->vm_bo
, struct amdgpu_vm_pt
, base
);
380 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
382 struct amdgpu_vm_pt_cursor
{
384 struct amdgpu_vm_pt
*parent
;
385 struct amdgpu_vm_pt
*entry
;
390 * amdgpu_vm_pt_start - start PD/PT walk
392 * @adev: amdgpu_device pointer
393 * @vm: amdgpu_vm structure
394 * @start: start address of the walk
395 * @cursor: state to initialize
397 * Initialize a amdgpu_vm_pt_cursor to start a walk.
399 static void amdgpu_vm_pt_start(struct amdgpu_device
*adev
,
400 struct amdgpu_vm
*vm
, uint64_t start
,
401 struct amdgpu_vm_pt_cursor
*cursor
)
404 cursor
->parent
= NULL
;
405 cursor
->entry
= &vm
->root
;
406 cursor
->level
= adev
->vm_manager
.root_level
;
410 * amdgpu_vm_pt_descendant - go to child node
412 * @adev: amdgpu_device pointer
413 * @cursor: current state
415 * Walk to the child node of the current node.
417 * True if the walk was possible, false otherwise.
419 static bool amdgpu_vm_pt_descendant(struct amdgpu_device
*adev
,
420 struct amdgpu_vm_pt_cursor
*cursor
)
422 unsigned mask
, shift
, idx
;
424 if (!cursor
->entry
->entries
)
427 BUG_ON(!cursor
->entry
->base
.bo
);
428 mask
= amdgpu_vm_entries_mask(adev
, cursor
->level
);
429 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
);
432 idx
= (cursor
->pfn
>> shift
) & mask
;
433 cursor
->parent
= cursor
->entry
;
434 cursor
->entry
= &cursor
->entry
->entries
[idx
];
439 * amdgpu_vm_pt_sibling - go to sibling node
441 * @adev: amdgpu_device pointer
442 * @cursor: current state
444 * Walk to the sibling node of the current node.
446 * True if the walk was possible, false otherwise.
448 static bool amdgpu_vm_pt_sibling(struct amdgpu_device
*adev
,
449 struct amdgpu_vm_pt_cursor
*cursor
)
451 unsigned shift
, num_entries
;
453 /* Root doesn't have a sibling */
457 /* Go to our parents and see if we got a sibling */
458 shift
= amdgpu_vm_level_shift(adev
, cursor
->level
- 1);
459 num_entries
= amdgpu_vm_num_entries(adev
, cursor
->level
- 1);
461 if (cursor
->entry
== &cursor
->parent
->entries
[num_entries
- 1])
464 cursor
->pfn
+= 1ULL << shift
;
465 cursor
->pfn
&= ~((1ULL << shift
) - 1);
471 * amdgpu_vm_pt_ancestor - go to parent node
473 * @cursor: current state
475 * Walk to the parent node of the current node.
477 * True if the walk was possible, false otherwise.
479 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor
*cursor
)
485 cursor
->entry
= cursor
->parent
;
486 cursor
->parent
= amdgpu_vm_pt_parent(cursor
->parent
);
491 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
493 * @adev: amdgpu_device pointer
494 * @cursor: current state
496 * Walk the PD/PT tree to the next node.
498 static void amdgpu_vm_pt_next(struct amdgpu_device
*adev
,
499 struct amdgpu_vm_pt_cursor
*cursor
)
501 /* First try a newborn child */
502 if (amdgpu_vm_pt_descendant(adev
, cursor
))
505 /* If that didn't worked try to find a sibling */
506 while (!amdgpu_vm_pt_sibling(adev
, cursor
)) {
507 /* No sibling, go to our parents and grandparents */
508 if (!amdgpu_vm_pt_ancestor(cursor
)) {
516 * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
518 * @adev: amdgpu_device pointer
519 * @vm: amdgpu_vm structure
520 * @start: start addr of the walk
521 * @cursor: state to initialize
523 * Start a walk and go directly to the leaf node.
525 static void amdgpu_vm_pt_first_leaf(struct amdgpu_device
*adev
,
526 struct amdgpu_vm
*vm
, uint64_t start
,
527 struct amdgpu_vm_pt_cursor
*cursor
)
529 amdgpu_vm_pt_start(adev
, vm
, start
, cursor
);
530 while (amdgpu_vm_pt_descendant(adev
, cursor
));
534 * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
536 * @adev: amdgpu_device pointer
537 * @cursor: current state
539 * Walk the PD/PT tree to the next leaf node.
541 static void amdgpu_vm_pt_next_leaf(struct amdgpu_device
*adev
,
542 struct amdgpu_vm_pt_cursor
*cursor
)
544 amdgpu_vm_pt_next(adev
, cursor
);
545 if (cursor
->pfn
!= ~0ll)
546 while (amdgpu_vm_pt_descendant(adev
, cursor
));
550 * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
552 #define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
553 for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
554 (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
557 * amdgpu_vm_pt_first_dfs - start a deep first search
559 * @adev: amdgpu_device structure
560 * @vm: amdgpu_vm structure
561 * @cursor: state to initialize
563 * Starts a deep first traversal of the PD/PT tree.
565 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device
*adev
,
566 struct amdgpu_vm
*vm
,
567 struct amdgpu_vm_pt_cursor
*cursor
)
569 amdgpu_vm_pt_start(adev
, vm
, 0, cursor
);
570 while (amdgpu_vm_pt_descendant(adev
, cursor
));
574 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
576 * @adev: amdgpu_device structure
577 * @cursor: current state
579 * Move the cursor to the next node in a deep first search.
581 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device
*adev
,
582 struct amdgpu_vm_pt_cursor
*cursor
)
588 cursor
->entry
= NULL
;
589 else if (amdgpu_vm_pt_sibling(adev
, cursor
))
590 while (amdgpu_vm_pt_descendant(adev
, cursor
));
592 amdgpu_vm_pt_ancestor(cursor
);
596 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
598 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
599 for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
600 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
601 (entry); (entry) = (cursor).entry, \
602 amdgpu_vm_pt_next_dfs((adev), &(cursor)))
605 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
607 * @vm: vm providing the BOs
608 * @validated: head of validation list
609 * @entry: entry to add
611 * Add the page directory to the list of BOs to
612 * validate for command submission.
614 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
615 struct list_head
*validated
,
616 struct amdgpu_bo_list_entry
*entry
)
619 entry
->tv
.bo
= &vm
->root
.base
.bo
->tbo
;
620 entry
->tv
.shared
= true;
621 entry
->user_pages
= NULL
;
622 list_add(&entry
->tv
.head
, validated
);
626 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
628 * @adev: amdgpu device pointer
629 * @vm: vm providing the BOs
631 * Move all BOs to the end of LRU and remember their positions to put them
634 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device
*adev
,
635 struct amdgpu_vm
*vm
)
637 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
638 struct amdgpu_vm_bo_base
*bo_base
;
640 if (vm
->bulk_moveable
) {
641 spin_lock(&glob
->lru_lock
);
642 ttm_bo_bulk_move_lru_tail(&vm
->lru_bulk_move
);
643 spin_unlock(&glob
->lru_lock
);
647 memset(&vm
->lru_bulk_move
, 0, sizeof(vm
->lru_bulk_move
));
649 spin_lock(&glob
->lru_lock
);
650 list_for_each_entry(bo_base
, &vm
->idle
, vm_status
) {
651 struct amdgpu_bo
*bo
= bo_base
->bo
;
656 ttm_bo_move_to_lru_tail(&bo
->tbo
, &vm
->lru_bulk_move
);
658 ttm_bo_move_to_lru_tail(&bo
->shadow
->tbo
,
661 spin_unlock(&glob
->lru_lock
);
663 vm
->bulk_moveable
= true;
667 * amdgpu_vm_validate_pt_bos - validate the page table BOs
669 * @adev: amdgpu device pointer
670 * @vm: vm providing the BOs
671 * @validate: callback to do the validation
672 * @param: parameter for the validation callback
674 * Validate the page table BOs on command submission if neccessary.
679 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
680 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
683 struct amdgpu_vm_bo_base
*bo_base
, *tmp
;
686 vm
->bulk_moveable
&= list_empty(&vm
->evicted
);
688 list_for_each_entry_safe(bo_base
, tmp
, &vm
->evicted
, vm_status
) {
689 struct amdgpu_bo
*bo
= bo_base
->bo
;
691 r
= validate(param
, bo
);
695 if (bo
->tbo
.type
!= ttm_bo_type_kernel
) {
696 amdgpu_vm_bo_moved(bo_base
);
698 if (vm
->use_cpu_for_update
)
699 r
= amdgpu_bo_kmap(bo
, NULL
);
701 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
705 r
= amdgpu_ttm_alloc_gart(&bo
->shadow
->tbo
);
709 amdgpu_vm_bo_relocated(bo_base
);
717 * amdgpu_vm_ready - check VM is ready for updates
721 * Check if all VM PDs/PTs are ready for updates
724 * True if eviction list is empty.
726 bool amdgpu_vm_ready(struct amdgpu_vm
*vm
)
728 return list_empty(&vm
->evicted
);
732 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
734 * @adev: amdgpu_device pointer
735 * @vm: VM to clear BO from
737 * @level: level this BO is at
738 * @pte_support_ats: indicate ATS support from PTE
740 * Root PD needs to be reserved when calling this.
743 * 0 on success, errno otherwise.
745 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
746 struct amdgpu_vm
*vm
, struct amdgpu_bo
*bo
,
747 unsigned level
, bool pte_support_ats
)
749 struct ttm_operation_ctx ctx
= { true, false };
750 struct dma_fence
*fence
= NULL
;
751 unsigned entries
, ats_entries
;
752 struct amdgpu_ring
*ring
;
753 struct amdgpu_job
*job
;
757 entries
= amdgpu_bo_size(bo
) / 8;
759 if (pte_support_ats
) {
760 if (level
== adev
->vm_manager
.root_level
) {
761 ats_entries
= amdgpu_vm_level_shift(adev
, level
);
762 ats_entries
+= AMDGPU_GPU_PAGE_SHIFT
;
763 ats_entries
= AMDGPU_GMC_HOLE_START
>> ats_entries
;
764 ats_entries
= min(ats_entries
, entries
);
765 entries
-= ats_entries
;
767 ats_entries
= entries
;
774 ring
= container_of(vm
->entity
.rq
->sched
, struct amdgpu_ring
, sched
);
776 r
= reservation_object_reserve_shared(bo
->tbo
.resv
, 1);
780 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
784 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
788 r
= amdgpu_job_alloc_with_ib(adev
, 64, &job
);
792 addr
= amdgpu_bo_gpu_offset(bo
);
796 ats_value
= AMDGPU_PTE_DEFAULT_ATC
;
797 if (level
!= AMDGPU_VM_PTB
)
798 ats_value
|= AMDGPU_PDE_PTE
;
800 amdgpu_vm_set_pte_pde(adev
, &job
->ibs
[0], addr
, 0,
801 ats_entries
, 0, ats_value
);
802 addr
+= ats_entries
* 8;
806 amdgpu_vm_set_pte_pde(adev
, &job
->ibs
[0], addr
, 0,
809 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
811 WARN_ON(job
->ibs
[0].length_dw
> 64);
812 r
= amdgpu_sync_resv(adev
, &job
->sync
, bo
->tbo
.resv
,
813 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
817 r
= amdgpu_job_submit(job
, &vm
->entity
, AMDGPU_FENCE_OWNER_UNDEFINED
,
822 amdgpu_bo_fence(bo
, fence
, true);
823 dma_fence_put(fence
);
826 return amdgpu_vm_clear_bo(adev
, vm
, bo
->shadow
,
827 level
, pte_support_ats
);
832 amdgpu_job_free(job
);
839 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
841 * @adev: amdgpu_device pointer
843 * @bp: resulting BO allocation parameters
845 static void amdgpu_vm_bo_param(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
846 int level
, struct amdgpu_bo_param
*bp
)
848 memset(bp
, 0, sizeof(*bp
));
850 bp
->size
= amdgpu_vm_bo_size(adev
, level
);
851 bp
->byte_align
= AMDGPU_GPU_PAGE_SIZE
;
852 bp
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
853 if (bp
->size
<= PAGE_SIZE
&& adev
->asic_type
>= CHIP_VEGA10
&&
854 adev
->flags
& AMD_IS_APU
)
855 bp
->domain
|= AMDGPU_GEM_DOMAIN_GTT
;
856 bp
->domain
= amdgpu_bo_get_preferred_pin_domain(adev
, bp
->domain
);
857 bp
->flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
858 AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
859 if (vm
->use_cpu_for_update
)
860 bp
->flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
861 else if (!vm
->root
.base
.bo
|| vm
->root
.base
.bo
->shadow
)
862 bp
->flags
|= AMDGPU_GEM_CREATE_SHADOW
;
863 bp
->type
= ttm_bo_type_kernel
;
864 if (vm
->root
.base
.bo
)
865 bp
->resv
= vm
->root
.base
.bo
->tbo
.resv
;
869 * amdgpu_vm_alloc_pts - Allocate page tables.
871 * @adev: amdgpu_device pointer
872 * @vm: VM to allocate page tables for
873 * @saddr: Start address which needs to be allocated
874 * @size: Size from start address we need.
876 * Make sure the page directories and page tables are allocated
879 * 0 on success, errno otherwise.
881 int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
882 struct amdgpu_vm
*vm
,
883 uint64_t saddr
, uint64_t size
)
885 struct amdgpu_vm_pt_cursor cursor
;
886 struct amdgpu_bo
*pt
;
891 /* validate the parameters */
892 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| size
& AMDGPU_GPU_PAGE_MASK
)
895 eaddr
= saddr
+ size
- 1;
897 if (vm
->pte_support_ats
)
898 ats
= saddr
< AMDGPU_GMC_HOLE_START
;
900 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
901 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
903 if (eaddr
>= adev
->vm_manager
.max_pfn
) {
904 dev_err(adev
->dev
, "va above limit (0x%08llX >= 0x%08llX)\n",
905 eaddr
, adev
->vm_manager
.max_pfn
);
909 for_each_amdgpu_vm_pt_leaf(adev
, vm
, saddr
, eaddr
, cursor
) {
910 struct amdgpu_vm_pt
*entry
= cursor
.entry
;
911 struct amdgpu_bo_param bp
;
913 if (cursor
.level
< AMDGPU_VM_PTB
) {
914 unsigned num_entries
;
916 num_entries
= amdgpu_vm_num_entries(adev
, cursor
.level
);
917 entry
->entries
= kvmalloc_array(num_entries
,
918 sizeof(*entry
->entries
),
929 amdgpu_vm_bo_param(adev
, vm
, cursor
.level
, &bp
);
931 r
= amdgpu_bo_create(adev
, &bp
, &pt
);
935 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
, cursor
.level
, ats
);
939 if (vm
->use_cpu_for_update
) {
940 r
= amdgpu_bo_kmap(pt
, NULL
);
945 /* Keep a reference to the root directory to avoid
946 * freeing them up in the wrong order.
948 pt
->parent
= amdgpu_bo_ref(cursor
.parent
->base
.bo
);
950 amdgpu_vm_bo_base_init(&entry
->base
, vm
, pt
);
956 amdgpu_bo_unref(&pt
->shadow
);
957 amdgpu_bo_unref(&pt
);
962 * amdgpu_vm_free_pts - free PD/PT levels
964 * @adev: amdgpu device structure
965 * @vm: amdgpu vm structure
967 * Free the page directory or page table level and all sub levels.
969 static void amdgpu_vm_free_pts(struct amdgpu_device
*adev
,
970 struct amdgpu_vm
*vm
)
972 struct amdgpu_vm_pt_cursor cursor
;
973 struct amdgpu_vm_pt
*entry
;
975 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, cursor
, entry
) {
977 if (entry
->base
.bo
) {
978 entry
->base
.bo
->vm_bo
= NULL
;
979 list_del(&entry
->base
.vm_status
);
980 amdgpu_bo_unref(&entry
->base
.bo
->shadow
);
981 amdgpu_bo_unref(&entry
->base
.bo
);
983 kvfree(entry
->entries
);
986 BUG_ON(vm
->root
.base
.bo
);
990 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
992 * @adev: amdgpu_device pointer
994 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
996 const struct amdgpu_ip_block
*ip_block
;
997 bool has_compute_vm_bug
;
998 struct amdgpu_ring
*ring
;
1001 has_compute_vm_bug
= false;
1003 ip_block
= amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
1005 /* Compute has a VM bug for GFX version < 7.
1006 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1007 if (ip_block
->version
->major
<= 7)
1008 has_compute_vm_bug
= true;
1009 else if (ip_block
->version
->major
== 8)
1010 if (adev
->gfx
.mec_fw_version
< 673)
1011 has_compute_vm_bug
= true;
1014 for (i
= 0; i
< adev
->num_rings
; i
++) {
1015 ring
= adev
->rings
[i
];
1016 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
1017 /* only compute rings */
1018 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
1020 ring
->has_compute_vm_bug
= false;
1025 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1027 * @ring: ring on which the job will be submitted
1028 * @job: job to submit
1031 * True if sync is needed.
1033 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
1034 struct amdgpu_job
*job
)
1036 struct amdgpu_device
*adev
= ring
->adev
;
1037 unsigned vmhub
= ring
->funcs
->vmhub
;
1038 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1039 struct amdgpu_vmid
*id
;
1040 bool gds_switch_needed
;
1041 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
1045 id
= &id_mgr
->ids
[job
->vmid
];
1046 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1047 id
->gds_base
!= job
->gds_base
||
1048 id
->gds_size
!= job
->gds_size
||
1049 id
->gws_base
!= job
->gws_base
||
1050 id
->gws_size
!= job
->gws_size
||
1051 id
->oa_base
!= job
->oa_base
||
1052 id
->oa_size
!= job
->oa_size
);
1054 if (amdgpu_vmid_had_gpu_reset(adev
, id
))
1057 return vm_flush_needed
|| gds_switch_needed
;
1061 * amdgpu_vm_flush - hardware flush the vm
1063 * @ring: ring to use for flush
1065 * @need_pipe_sync: is pipe sync needed
1067 * Emit a VM flush when it is necessary.
1070 * 0 on success, errno otherwise.
1072 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
, bool need_pipe_sync
)
1074 struct amdgpu_device
*adev
= ring
->adev
;
1075 unsigned vmhub
= ring
->funcs
->vmhub
;
1076 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
1077 struct amdgpu_vmid
*id
= &id_mgr
->ids
[job
->vmid
];
1078 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
1079 id
->gds_base
!= job
->gds_base
||
1080 id
->gds_size
!= job
->gds_size
||
1081 id
->gws_base
!= job
->gws_base
||
1082 id
->gws_size
!= job
->gws_size
||
1083 id
->oa_base
!= job
->oa_base
||
1084 id
->oa_size
!= job
->oa_size
);
1085 bool vm_flush_needed
= job
->vm_needs_flush
;
1086 bool pasid_mapping_needed
= id
->pasid
!= job
->pasid
||
1087 !id
->pasid_mapping
||
1088 !dma_fence_is_signaled(id
->pasid_mapping
);
1089 struct dma_fence
*fence
= NULL
;
1090 unsigned patch_offset
= 0;
1093 if (amdgpu_vmid_had_gpu_reset(adev
, id
)) {
1094 gds_switch_needed
= true;
1095 vm_flush_needed
= true;
1096 pasid_mapping_needed
= true;
1099 gds_switch_needed
&= !!ring
->funcs
->emit_gds_switch
;
1100 vm_flush_needed
&= !!ring
->funcs
->emit_vm_flush
&&
1101 job
->vm_pd_addr
!= AMDGPU_BO_INVALID_OFFSET
;
1102 pasid_mapping_needed
&= adev
->gmc
.gmc_funcs
->emit_pasid_mapping
&&
1103 ring
->funcs
->emit_wreg
;
1105 if (!vm_flush_needed
&& !gds_switch_needed
&& !need_pipe_sync
)
1108 if (ring
->funcs
->init_cond_exec
)
1109 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
1112 amdgpu_ring_emit_pipeline_sync(ring
);
1114 if (vm_flush_needed
) {
1115 trace_amdgpu_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1116 amdgpu_ring_emit_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
1119 if (pasid_mapping_needed
)
1120 amdgpu_gmc_emit_pasid_mapping(ring
, job
->vmid
, job
->pasid
);
1122 if (vm_flush_needed
|| pasid_mapping_needed
) {
1123 r
= amdgpu_fence_emit(ring
, &fence
, 0);
1128 if (vm_flush_needed
) {
1129 mutex_lock(&id_mgr
->lock
);
1130 dma_fence_put(id
->last_flush
);
1131 id
->last_flush
= dma_fence_get(fence
);
1132 id
->current_gpu_reset_count
=
1133 atomic_read(&adev
->gpu_reset_counter
);
1134 mutex_unlock(&id_mgr
->lock
);
1137 if (pasid_mapping_needed
) {
1138 id
->pasid
= job
->pasid
;
1139 dma_fence_put(id
->pasid_mapping
);
1140 id
->pasid_mapping
= dma_fence_get(fence
);
1142 dma_fence_put(fence
);
1144 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
1145 id
->gds_base
= job
->gds_base
;
1146 id
->gds_size
= job
->gds_size
;
1147 id
->gws_base
= job
->gws_base
;
1148 id
->gws_size
= job
->gws_size
;
1149 id
->oa_base
= job
->oa_base
;
1150 id
->oa_size
= job
->oa_size
;
1151 amdgpu_ring_emit_gds_switch(ring
, job
->vmid
, job
->gds_base
,
1152 job
->gds_size
, job
->gws_base
,
1153 job
->gws_size
, job
->oa_base
,
1157 if (ring
->funcs
->patch_cond_exec
)
1158 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
1160 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1161 if (ring
->funcs
->emit_switch_buffer
) {
1162 amdgpu_ring_emit_switch_buffer(ring
);
1163 amdgpu_ring_emit_switch_buffer(ring
);
1169 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1172 * @bo: requested buffer object
1174 * Find @bo inside the requested vm.
1175 * Search inside the @bos vm list for the requested vm
1176 * Returns the found bo_va or NULL if none is found
1178 * Object has to be reserved!
1181 * Found bo_va or NULL.
1183 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
1184 struct amdgpu_bo
*bo
)
1186 struct amdgpu_vm_bo_base
*base
;
1188 for (base
= bo
->vm_bo
; base
; base
= base
->next
) {
1192 return container_of(base
, struct amdgpu_bo_va
, base
);
1198 * amdgpu_vm_do_set_ptes - helper to call the right asic function
1200 * @params: see amdgpu_pte_update_params definition
1201 * @bo: PD/PT to update
1202 * @pe: addr of the page entry
1203 * @addr: dst addr to write into pe
1204 * @count: number of page entries to update
1205 * @incr: increase next addr by incr bytes
1206 * @flags: hw access flags
1208 * Traces the parameters and calls the right asic functions
1209 * to setup the page table using the DMA.
1211 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params
*params
,
1212 struct amdgpu_bo
*bo
,
1213 uint64_t pe
, uint64_t addr
,
1214 unsigned count
, uint32_t incr
,
1217 pe
+= amdgpu_bo_gpu_offset(bo
);
1218 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
1221 amdgpu_vm_write_pte(params
->adev
, params
->ib
, pe
,
1222 addr
| flags
, count
, incr
);
1225 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
1226 count
, incr
, flags
);
1231 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
1233 * @params: see amdgpu_pte_update_params definition
1234 * @bo: PD/PT to update
1235 * @pe: addr of the page entry
1236 * @addr: dst addr to write into pe
1237 * @count: number of page entries to update
1238 * @incr: increase next addr by incr bytes
1239 * @flags: hw access flags
1241 * Traces the parameters and calls the DMA function to copy the PTEs.
1243 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params
*params
,
1244 struct amdgpu_bo
*bo
,
1245 uint64_t pe
, uint64_t addr
,
1246 unsigned count
, uint32_t incr
,
1249 uint64_t src
= (params
->src
+ (addr
>> 12) * 8);
1251 pe
+= amdgpu_bo_gpu_offset(bo
);
1252 trace_amdgpu_vm_copy_ptes(pe
, src
, count
);
1254 amdgpu_vm_copy_pte(params
->adev
, params
->ib
, pe
, src
, count
);
1258 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1260 * @pages_addr: optional DMA address to use for lookup
1261 * @addr: the unmapped addr
1263 * Look up the physical address of the page that the pte resolves
1267 * The pointer for the page table entry.
1269 static uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
1273 /* page table offset */
1274 result
= pages_addr
[addr
>> PAGE_SHIFT
];
1276 /* in case cpu page size != gpu page size*/
1277 result
|= addr
& (~PAGE_MASK
);
1279 result
&= 0xFFFFFFFFFFFFF000ULL
;
1285 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
1287 * @params: see amdgpu_pte_update_params definition
1288 * @bo: PD/PT to update
1289 * @pe: kmap addr of the page entry
1290 * @addr: dst addr to write into pe
1291 * @count: number of page entries to update
1292 * @incr: increase next addr by incr bytes
1293 * @flags: hw access flags
1295 * Write count number of PT/PD entries directly.
1297 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params
*params
,
1298 struct amdgpu_bo
*bo
,
1299 uint64_t pe
, uint64_t addr
,
1300 unsigned count
, uint32_t incr
,
1306 pe
+= (unsigned long)amdgpu_bo_kptr(bo
);
1308 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
1310 for (i
= 0; i
< count
; i
++) {
1311 value
= params
->pages_addr
?
1312 amdgpu_vm_map_gart(params
->pages_addr
, addr
) :
1314 amdgpu_gmc_set_pte_pde(params
->adev
, (void *)(uintptr_t)pe
,
1322 * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1324 * @adev: amdgpu_device pointer
1326 * @owner: fence owner
1329 * 0 on success, errno otherwise.
1331 static int amdgpu_vm_wait_pd(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
1334 struct amdgpu_sync sync
;
1337 amdgpu_sync_create(&sync
);
1338 amdgpu_sync_resv(adev
, &sync
, vm
->root
.base
.bo
->tbo
.resv
, owner
, false);
1339 r
= amdgpu_sync_wait(&sync
, true);
1340 amdgpu_sync_free(&sync
);
1346 * amdgpu_vm_update_func - helper to call update function
1348 * Calls the update function for both the given BO as well as its shadow.
1350 static void amdgpu_vm_update_func(struct amdgpu_pte_update_params
*params
,
1351 struct amdgpu_bo
*bo
,
1352 uint64_t pe
, uint64_t addr
,
1353 unsigned count
, uint32_t incr
,
1357 params
->func(params
, bo
->shadow
, pe
, addr
, count
, incr
, flags
);
1358 params
->func(params
, bo
, pe
, addr
, count
, incr
, flags
);
1362 * amdgpu_vm_update_pde - update a single level in the hierarchy
1364 * @param: parameters for the update
1366 * @parent: parent directory
1367 * @entry: entry to update
1369 * Makes sure the requested entry in parent is up to date.
1371 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params
*params
,
1372 struct amdgpu_vm
*vm
,
1373 struct amdgpu_vm_pt
*parent
,
1374 struct amdgpu_vm_pt
*entry
)
1376 struct amdgpu_bo
*bo
= parent
->base
.bo
, *pbo
;
1377 uint64_t pde
, pt
, flags
;
1380 /* Don't update huge pages here */
1384 for (level
= 0, pbo
= bo
->parent
; pbo
; ++level
)
1387 level
+= params
->adev
->vm_manager
.root_level
;
1388 amdgpu_gmc_get_pde_for_bo(entry
->base
.bo
, level
, &pt
, &flags
);
1389 pde
= (entry
- parent
->entries
) * 8;
1390 amdgpu_vm_update_func(params
, bo
, pde
, pt
, 1, 0, flags
);
1394 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1396 * @adev: amdgpu_device pointer
1399 * Mark all PD level as invalid after an error.
1401 static void amdgpu_vm_invalidate_pds(struct amdgpu_device
*adev
,
1402 struct amdgpu_vm
*vm
)
1404 struct amdgpu_vm_pt_cursor cursor
;
1405 struct amdgpu_vm_pt
*entry
;
1407 for_each_amdgpu_vm_pt_dfs_safe(adev
, vm
, cursor
, entry
)
1408 if (entry
->base
.bo
&& !entry
->base
.moved
)
1409 amdgpu_vm_bo_relocated(&entry
->base
);
1413 * amdgpu_vm_update_directories - make sure that all directories are valid
1415 * @adev: amdgpu_device pointer
1418 * Makes sure all directories are up to date.
1421 * 0 for success, error for failure.
1423 int amdgpu_vm_update_directories(struct amdgpu_device
*adev
,
1424 struct amdgpu_vm
*vm
)
1426 struct amdgpu_pte_update_params params
;
1427 struct amdgpu_job
*job
;
1431 if (list_empty(&vm
->relocated
))
1435 memset(¶ms
, 0, sizeof(params
));
1438 if (vm
->use_cpu_for_update
) {
1439 r
= amdgpu_vm_wait_pd(adev
, vm
, AMDGPU_FENCE_OWNER_VM
);
1443 params
.func
= amdgpu_vm_cpu_set_ptes
;
1446 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1450 params
.ib
= &job
->ibs
[0];
1451 params
.func
= amdgpu_vm_do_set_ptes
;
1454 while (!list_empty(&vm
->relocated
)) {
1455 struct amdgpu_vm_pt
*pt
, *entry
;
1457 entry
= list_first_entry(&vm
->relocated
, struct amdgpu_vm_pt
,
1459 amdgpu_vm_bo_idle(&entry
->base
);
1461 pt
= amdgpu_vm_pt_parent(entry
);
1465 amdgpu_vm_update_pde(¶ms
, vm
, pt
, entry
);
1467 if (!vm
->use_cpu_for_update
&&
1468 (ndw
- params
.ib
->length_dw
) < 32)
1472 if (vm
->use_cpu_for_update
) {
1475 amdgpu_asic_flush_hdp(adev
, NULL
);
1476 } else if (params
.ib
->length_dw
== 0) {
1477 amdgpu_job_free(job
);
1479 struct amdgpu_bo
*root
= vm
->root
.base
.bo
;
1480 struct amdgpu_ring
*ring
;
1481 struct dma_fence
*fence
;
1483 ring
= container_of(vm
->entity
.rq
->sched
, struct amdgpu_ring
,
1486 amdgpu_ring_pad_ib(ring
, params
.ib
);
1487 amdgpu_sync_resv(adev
, &job
->sync
, root
->tbo
.resv
,
1488 AMDGPU_FENCE_OWNER_VM
, false);
1489 WARN_ON(params
.ib
->length_dw
> ndw
);
1490 r
= amdgpu_job_submit(job
, &vm
->entity
, AMDGPU_FENCE_OWNER_VM
,
1495 amdgpu_bo_fence(root
, fence
, true);
1496 dma_fence_put(vm
->last_update
);
1497 vm
->last_update
= fence
;
1500 if (!list_empty(&vm
->relocated
))
1506 amdgpu_vm_invalidate_pds(adev
, vm
);
1507 amdgpu_job_free(job
);
1512 * amdgpu_vm_update_huge - figure out parameters for PTE updates
1514 * Make sure to set the right flags for the PTEs at the desired level.
1516 static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params
*params
,
1517 struct amdgpu_bo
*bo
, unsigned level
,
1518 uint64_t pe
, uint64_t addr
,
1519 unsigned count
, uint32_t incr
,
1523 if (level
!= AMDGPU_VM_PTB
) {
1524 flags
|= AMDGPU_PDE_PTE
;
1525 amdgpu_gmc_get_vm_pde(params
->adev
, level
, &addr
, &flags
);
1528 amdgpu_vm_update_func(params
, bo
, pe
, addr
, count
, incr
, flags
);
1532 * amdgpu_vm_fragment - get fragment for PTEs
1534 * @params: see amdgpu_pte_update_params definition
1535 * @start: first PTE to handle
1536 * @end: last PTE to handle
1537 * @flags: hw mapping flags
1538 * @frag: resulting fragment size
1539 * @frag_end: end of this fragment
1541 * Returns the first possible fragment for the start and end address.
1543 static void amdgpu_vm_fragment(struct amdgpu_pte_update_params
*params
,
1544 uint64_t start
, uint64_t end
, uint64_t flags
,
1545 unsigned int *frag
, uint64_t *frag_end
)
1548 * The MC L1 TLB supports variable sized pages, based on a fragment
1549 * field in the PTE. When this field is set to a non-zero value, page
1550 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1551 * flags are considered valid for all PTEs within the fragment range
1552 * and corresponding mappings are assumed to be physically contiguous.
1554 * The L1 TLB can store a single PTE for the whole fragment,
1555 * significantly increasing the space available for translation
1556 * caching. This leads to large improvements in throughput when the
1557 * TLB is under pressure.
1559 * The L2 TLB distributes small and large fragments into two
1560 * asymmetric partitions. The large fragment cache is significantly
1561 * larger. Thus, we try to use large fragments wherever possible.
1562 * Userspace can support this by aligning virtual base address and
1563 * allocation size to the fragment size.
1565 * Starting with Vega10 the fragment size only controls the L1. The L2
1566 * is now directly feed with small/huge/giant pages from the walker.
1570 if (params
->adev
->asic_type
< CHIP_VEGA10
)
1571 max_frag
= params
->adev
->vm_manager
.fragment_size
;
1575 /* system pages are non continuously */
1582 /* This intentionally wraps around if no bit is set */
1583 *frag
= min((unsigned)ffs(start
) - 1, (unsigned)fls64(end
- start
) - 1);
1584 if (*frag
>= max_frag
) {
1586 *frag_end
= end
& ~((1ULL << max_frag
) - 1);
1588 *frag_end
= start
+ (1 << *frag
);
1593 * amdgpu_vm_update_ptes - make sure that page tables are valid
1595 * @params: see amdgpu_pte_update_params definition
1596 * @start: start of GPU address range
1597 * @end: end of GPU address range
1598 * @dst: destination address to map to, the next dst inside the function
1599 * @flags: mapping flags
1601 * Update the page tables in the range @start - @end.
1604 * 0 for success, -EINVAL for failure.
1606 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
1607 uint64_t start
, uint64_t end
,
1608 uint64_t dst
, uint64_t flags
)
1610 struct amdgpu_device
*adev
= params
->adev
;
1611 struct amdgpu_vm_pt_cursor cursor
;
1612 uint64_t frag_start
= start
, frag_end
;
1615 /* figure out the initial fragment */
1616 amdgpu_vm_fragment(params
, frag_start
, end
, flags
, &frag
, &frag_end
);
1618 /* walk over the address space and update the PTs */
1619 amdgpu_vm_pt_start(adev
, params
->vm
, start
, &cursor
);
1620 while (cursor
.pfn
< end
) {
1621 struct amdgpu_bo
*pt
= cursor
.entry
->base
.bo
;
1622 unsigned shift
, parent_shift
, mask
;
1623 uint64_t incr
, entry_end
, pe_start
;
1628 /* The root level can't be a huge page */
1629 if (cursor
.level
== adev
->vm_manager
.root_level
) {
1630 if (!amdgpu_vm_pt_descendant(adev
, &cursor
))
1635 /* If it isn't already handled it can't be a huge page */
1636 if (cursor
.entry
->huge
) {
1637 /* Add the entry to the relocated list to update it. */
1638 cursor
.entry
->huge
= false;
1639 amdgpu_vm_bo_relocated(&cursor
.entry
->base
);
1642 shift
= amdgpu_vm_level_shift(adev
, cursor
.level
);
1643 parent_shift
= amdgpu_vm_level_shift(adev
, cursor
.level
- 1);
1644 if (adev
->asic_type
< CHIP_VEGA10
) {
1645 /* No huge page support before GMC v9 */
1646 if (cursor
.level
!= AMDGPU_VM_PTB
) {
1647 if (!amdgpu_vm_pt_descendant(adev
, &cursor
))
1651 } else if (frag
< shift
) {
1652 /* We can't use this level when the fragment size is
1653 * smaller than the address shift. Go to the next
1654 * child entry and try again.
1656 if (!amdgpu_vm_pt_descendant(adev
, &cursor
))
1659 } else if (frag
>= parent_shift
) {
1660 /* If the fragment size is even larger than the parent
1661 * shift we should go up one level and check it again.
1663 if (!amdgpu_vm_pt_ancestor(&cursor
))
1668 /* Looks good so far, calculate parameters for the update */
1669 incr
= AMDGPU_GPU_PAGE_SIZE
<< shift
;
1670 mask
= amdgpu_vm_entries_mask(adev
, cursor
.level
);
1671 pe_start
= ((cursor
.pfn
>> shift
) & mask
) * 8;
1672 entry_end
= (mask
+ 1) << shift
;
1673 entry_end
+= cursor
.pfn
& ~(entry_end
- 1);
1674 entry_end
= min(entry_end
, end
);
1677 uint64_t upd_end
= min(entry_end
, frag_end
);
1678 unsigned nptes
= (upd_end
- frag_start
) >> shift
;
1680 amdgpu_vm_update_huge(params
, pt
, cursor
.level
,
1681 pe_start
, dst
, nptes
, incr
,
1682 flags
| AMDGPU_PTE_FRAG(frag
));
1684 pe_start
+= nptes
* 8;
1685 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
<< shift
;
1687 frag_start
= upd_end
;
1688 if (frag_start
>= frag_end
) {
1689 /* figure out the next fragment */
1690 amdgpu_vm_fragment(params
, frag_start
, end
,
1691 flags
, &frag
, &frag_end
);
1695 } while (frag_start
< entry_end
);
1697 if (amdgpu_vm_pt_descendant(adev
, &cursor
)) {
1698 /* Mark all child entries as huge */
1699 while (cursor
.pfn
< frag_start
) {
1700 cursor
.entry
->huge
= true;
1701 amdgpu_vm_pt_next(adev
, &cursor
);
1704 } else if (frag
>= shift
) {
1705 /* or just move on to the next on the same level. */
1706 amdgpu_vm_pt_next(adev
, &cursor
);
1714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1716 * @adev: amdgpu_device pointer
1717 * @exclusive: fence we need to sync to
1718 * @pages_addr: DMA addresses to use for mapping
1720 * @start: start of mapped range
1721 * @last: last mapped entry
1722 * @flags: flags for the entries
1723 * @addr: addr to set the area to
1724 * @fence: optional resulting fence
1726 * Fill in the page table entries between @start and @last.
1729 * 0 for success, -EINVAL for failure.
1731 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1732 struct dma_fence
*exclusive
,
1733 dma_addr_t
*pages_addr
,
1734 struct amdgpu_vm
*vm
,
1735 uint64_t start
, uint64_t last
,
1736 uint64_t flags
, uint64_t addr
,
1737 struct dma_fence
**fence
)
1739 struct amdgpu_ring
*ring
;
1740 void *owner
= AMDGPU_FENCE_OWNER_VM
;
1741 unsigned nptes
, ncmds
, ndw
;
1742 struct amdgpu_job
*job
;
1743 struct amdgpu_pte_update_params params
;
1744 struct dma_fence
*f
= NULL
;
1747 memset(¶ms
, 0, sizeof(params
));
1751 /* sync to everything on unmapping */
1752 if (!(flags
& AMDGPU_PTE_VALID
))
1753 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
1755 if (vm
->use_cpu_for_update
) {
1756 /* params.src is used as flag to indicate system Memory */
1760 /* Wait for PT BOs to be free. PTs share the same resv. object
1763 r
= amdgpu_vm_wait_pd(adev
, vm
, owner
);
1767 params
.func
= amdgpu_vm_cpu_set_ptes
;
1768 params
.pages_addr
= pages_addr
;
1769 return amdgpu_vm_update_ptes(¶ms
, start
, last
+ 1,
1773 ring
= container_of(vm
->entity
.rq
->sched
, struct amdgpu_ring
, sched
);
1775 nptes
= last
- start
+ 1;
1778 * reserve space for two commands every (1 << BLOCK_SIZE)
1779 * entries or 2k dwords (whatever is smaller)
1781 * The second command is for the shadow pagetables.
1783 if (vm
->root
.base
.bo
->shadow
)
1784 ncmds
= ((nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1) * 2;
1786 ncmds
= ((nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1);
1792 /* copy commands needed */
1793 ndw
+= ncmds
* adev
->vm_manager
.vm_pte_funcs
->copy_pte_num_dw
;
1798 params
.func
= amdgpu_vm_do_copy_ptes
;
1801 /* set page commands needed */
1804 /* extra commands for begin/end fragments */
1805 if (vm
->root
.base
.bo
->shadow
)
1806 ndw
+= 2 * 10 * adev
->vm_manager
.fragment_size
* 2;
1808 ndw
+= 2 * 10 * adev
->vm_manager
.fragment_size
;
1810 params
.func
= amdgpu_vm_do_set_ptes
;
1813 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1817 params
.ib
= &job
->ibs
[0];
1823 /* Put the PTEs at the end of the IB. */
1824 i
= ndw
- nptes
* 2;
1825 pte
= (uint64_t *)&(job
->ibs
->ptr
[i
]);
1826 params
.src
= job
->ibs
->gpu_addr
+ i
* 4;
1828 for (i
= 0; i
< nptes
; ++i
) {
1829 pte
[i
] = amdgpu_vm_map_gart(pages_addr
, addr
+ i
*
1830 AMDGPU_GPU_PAGE_SIZE
);
1836 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
, false);
1840 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->root
.base
.bo
->tbo
.resv
,
1845 r
= reservation_object_reserve_shared(vm
->root
.base
.bo
->tbo
.resv
, 1);
1849 r
= amdgpu_vm_update_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1853 amdgpu_ring_pad_ib(ring
, params
.ib
);
1854 WARN_ON(params
.ib
->length_dw
> ndw
);
1855 r
= amdgpu_job_submit(job
, &vm
->entity
, AMDGPU_FENCE_OWNER_VM
, &f
);
1859 amdgpu_bo_fence(vm
->root
.base
.bo
, f
, true);
1860 dma_fence_put(*fence
);
1865 amdgpu_job_free(job
);
1870 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1872 * @adev: amdgpu_device pointer
1873 * @exclusive: fence we need to sync to
1874 * @pages_addr: DMA addresses to use for mapping
1876 * @mapping: mapped range and flags to use for the update
1877 * @flags: HW flags for the mapping
1878 * @nodes: array of drm_mm_nodes with the MC addresses
1879 * @fence: optional resulting fence
1881 * Split the mapping into smaller chunks so that each update fits
1885 * 0 for success, -EINVAL for failure.
1887 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1888 struct dma_fence
*exclusive
,
1889 dma_addr_t
*pages_addr
,
1890 struct amdgpu_vm
*vm
,
1891 struct amdgpu_bo_va_mapping
*mapping
,
1893 struct drm_mm_node
*nodes
,
1894 struct dma_fence
**fence
)
1896 unsigned min_linear_pages
= 1 << adev
->vm_manager
.fragment_size
;
1897 uint64_t pfn
, start
= mapping
->start
;
1900 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1901 * but in case of something, we filter the flags in first place
1903 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1904 flags
&= ~AMDGPU_PTE_READABLE
;
1905 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1906 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1908 flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1909 flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1911 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1912 flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_MASK
);
1914 if ((mapping
->flags
& AMDGPU_PTE_PRT
) &&
1915 (adev
->asic_type
>= CHIP_VEGA10
)) {
1916 flags
|= AMDGPU_PTE_PRT
;
1917 flags
&= ~AMDGPU_PTE_VALID
;
1920 trace_amdgpu_vm_bo_update(mapping
);
1922 pfn
= mapping
->offset
>> PAGE_SHIFT
;
1924 while (pfn
>= nodes
->size
) {
1931 dma_addr_t
*dma_addr
= NULL
;
1932 uint64_t max_entries
;
1933 uint64_t addr
, last
;
1936 addr
= nodes
->start
<< PAGE_SHIFT
;
1937 max_entries
= (nodes
->size
- pfn
) *
1938 AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1941 max_entries
= S64_MAX
;
1947 max_entries
= min(max_entries
, 16ull * 1024ull);
1949 count
< max_entries
/ AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1951 uint64_t idx
= pfn
+ count
;
1953 if (pages_addr
[idx
] !=
1954 (pages_addr
[idx
- 1] + PAGE_SIZE
))
1958 if (count
< min_linear_pages
) {
1959 addr
= pfn
<< PAGE_SHIFT
;
1960 dma_addr
= pages_addr
;
1962 addr
= pages_addr
[pfn
];
1963 max_entries
= count
* AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1966 } else if (flags
& AMDGPU_PTE_VALID
) {
1967 addr
+= adev
->vm_manager
.vram_base_offset
;
1968 addr
+= pfn
<< PAGE_SHIFT
;
1971 last
= min((uint64_t)mapping
->last
, start
+ max_entries
- 1);
1972 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
, dma_addr
, vm
,
1973 start
, last
, flags
, addr
,
1978 pfn
+= (last
- start
+ 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE
;
1979 if (nodes
&& nodes
->size
== pfn
) {
1985 } while (unlikely(start
!= mapping
->last
+ 1));
1991 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1993 * @adev: amdgpu_device pointer
1994 * @bo_va: requested BO and VM object
1995 * @clear: if true clear the entries
1997 * Fill in the page table entries for @bo_va.
2000 * 0 for success, -EINVAL for failure.
2002 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
2003 struct amdgpu_bo_va
*bo_va
,
2006 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2007 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2008 struct amdgpu_bo_va_mapping
*mapping
;
2009 dma_addr_t
*pages_addr
= NULL
;
2010 struct ttm_mem_reg
*mem
;
2011 struct drm_mm_node
*nodes
;
2012 struct dma_fence
*exclusive
, **last_update
;
2021 struct ttm_dma_tt
*ttm
;
2024 nodes
= mem
->mm_node
;
2025 if (mem
->mem_type
== TTM_PL_TT
) {
2026 ttm
= container_of(bo
->tbo
.ttm
, struct ttm_dma_tt
, ttm
);
2027 pages_addr
= ttm
->dma_address
;
2029 exclusive
= reservation_object_get_excl(bo
->tbo
.resv
);
2033 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->tbo
.ttm
, mem
);
2037 if (clear
|| (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
))
2038 last_update
= &vm
->last_update
;
2040 last_update
= &bo_va
->last_pt_update
;
2042 if (!clear
&& bo_va
->base
.moved
) {
2043 bo_va
->base
.moved
= false;
2044 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
2046 } else if (bo_va
->cleared
!= clear
) {
2047 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
2050 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2051 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
, pages_addr
, vm
,
2052 mapping
, flags
, nodes
,
2058 if (vm
->use_cpu_for_update
) {
2061 amdgpu_asic_flush_hdp(adev
, NULL
);
2064 /* If the BO is not in its preferred location add it back to
2065 * the evicted list so that it gets validated again on the
2066 * next command submission.
2068 if (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
) {
2069 uint32_t mem_type
= bo
->tbo
.mem
.mem_type
;
2071 if (!(bo
->preferred_domains
& amdgpu_mem_type_to_domain(mem_type
)))
2072 amdgpu_vm_bo_evicted(&bo_va
->base
);
2074 amdgpu_vm_bo_idle(&bo_va
->base
);
2076 amdgpu_vm_bo_done(&bo_va
->base
);
2079 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
2080 bo_va
->cleared
= clear
;
2082 if (trace_amdgpu_vm_bo_mapping_enabled()) {
2083 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
2084 trace_amdgpu_vm_bo_mapping(mapping
);
2091 * amdgpu_vm_update_prt_state - update the global PRT state
2093 * @adev: amdgpu_device pointer
2095 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
2097 unsigned long flags
;
2100 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
2101 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
2102 adev
->gmc
.gmc_funcs
->set_prt(adev
, enable
);
2103 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
2107 * amdgpu_vm_prt_get - add a PRT user
2109 * @adev: amdgpu_device pointer
2111 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
2113 if (!adev
->gmc
.gmc_funcs
->set_prt
)
2116 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
2117 amdgpu_vm_update_prt_state(adev
);
2121 * amdgpu_vm_prt_put - drop a PRT user
2123 * @adev: amdgpu_device pointer
2125 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
2127 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
2128 amdgpu_vm_update_prt_state(adev
);
2132 * amdgpu_vm_prt_cb - callback for updating the PRT status
2134 * @fence: fence for the callback
2135 * @_cb: the callback function
2137 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
2139 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
2141 amdgpu_vm_prt_put(cb
->adev
);
2146 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
2148 * @adev: amdgpu_device pointer
2149 * @fence: fence for the callback
2151 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
2152 struct dma_fence
*fence
)
2154 struct amdgpu_prt_cb
*cb
;
2156 if (!adev
->gmc
.gmc_funcs
->set_prt
)
2159 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
2161 /* Last resort when we are OOM */
2163 dma_fence_wait(fence
, false);
2165 amdgpu_vm_prt_put(adev
);
2168 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
2170 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
2175 * amdgpu_vm_free_mapping - free a mapping
2177 * @adev: amdgpu_device pointer
2179 * @mapping: mapping to be freed
2180 * @fence: fence of the unmap operation
2182 * Free a mapping and make sure we decrease the PRT usage count if applicable.
2184 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
2185 struct amdgpu_vm
*vm
,
2186 struct amdgpu_bo_va_mapping
*mapping
,
2187 struct dma_fence
*fence
)
2189 if (mapping
->flags
& AMDGPU_PTE_PRT
)
2190 amdgpu_vm_add_prt_cb(adev
, fence
);
2195 * amdgpu_vm_prt_fini - finish all prt mappings
2197 * @adev: amdgpu_device pointer
2200 * Register a cleanup callback to disable PRT support after VM dies.
2202 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2204 struct reservation_object
*resv
= vm
->root
.base
.bo
->tbo
.resv
;
2205 struct dma_fence
*excl
, **shared
;
2206 unsigned i
, shared_count
;
2209 r
= reservation_object_get_fences_rcu(resv
, &excl
,
2210 &shared_count
, &shared
);
2212 /* Not enough memory to grab the fence list, as last resort
2213 * block for all the fences to complete.
2215 reservation_object_wait_timeout_rcu(resv
, true, false,
2216 MAX_SCHEDULE_TIMEOUT
);
2220 /* Add a callback for each fence in the reservation object */
2221 amdgpu_vm_prt_get(adev
);
2222 amdgpu_vm_add_prt_cb(adev
, excl
);
2224 for (i
= 0; i
< shared_count
; ++i
) {
2225 amdgpu_vm_prt_get(adev
);
2226 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
2233 * amdgpu_vm_clear_freed - clear freed BOs in the PT
2235 * @adev: amdgpu_device pointer
2237 * @fence: optional resulting fence (unchanged if no work needed to be done
2238 * or if an error occurred)
2240 * Make sure all freed BOs are cleared in the PT.
2241 * PTs have to be reserved and mutex must be locked!
2247 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
2248 struct amdgpu_vm
*vm
,
2249 struct dma_fence
**fence
)
2251 struct amdgpu_bo_va_mapping
*mapping
;
2252 uint64_t init_pte_value
= 0;
2253 struct dma_fence
*f
= NULL
;
2256 while (!list_empty(&vm
->freed
)) {
2257 mapping
= list_first_entry(&vm
->freed
,
2258 struct amdgpu_bo_va_mapping
, list
);
2259 list_del(&mapping
->list
);
2261 if (vm
->pte_support_ats
&&
2262 mapping
->start
< AMDGPU_GMC_HOLE_START
)
2263 init_pte_value
= AMDGPU_PTE_DEFAULT_ATC
;
2265 r
= amdgpu_vm_bo_update_mapping(adev
, NULL
, NULL
, vm
,
2266 mapping
->start
, mapping
->last
,
2267 init_pte_value
, 0, &f
);
2268 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
2276 dma_fence_put(*fence
);
2287 * amdgpu_vm_handle_moved - handle moved BOs in the PT
2289 * @adev: amdgpu_device pointer
2292 * Make sure all BOs which are moved are updated in the PTs.
2297 * PTs have to be reserved!
2299 int amdgpu_vm_handle_moved(struct amdgpu_device
*adev
,
2300 struct amdgpu_vm
*vm
)
2302 struct amdgpu_bo_va
*bo_va
, *tmp
;
2303 struct reservation_object
*resv
;
2307 list_for_each_entry_safe(bo_va
, tmp
, &vm
->moved
, base
.vm_status
) {
2308 /* Per VM BOs never need to bo cleared in the page tables */
2309 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
2314 spin_lock(&vm
->invalidated_lock
);
2315 while (!list_empty(&vm
->invalidated
)) {
2316 bo_va
= list_first_entry(&vm
->invalidated
, struct amdgpu_bo_va
,
2318 resv
= bo_va
->base
.bo
->tbo
.resv
;
2319 spin_unlock(&vm
->invalidated_lock
);
2321 /* Try to reserve the BO to avoid clearing its ptes */
2322 if (!amdgpu_vm_debug
&& reservation_object_trylock(resv
))
2324 /* Somebody else is using the BO right now */
2328 r
= amdgpu_vm_bo_update(adev
, bo_va
, clear
);
2333 reservation_object_unlock(resv
);
2334 spin_lock(&vm
->invalidated_lock
);
2336 spin_unlock(&vm
->invalidated_lock
);
2342 * amdgpu_vm_bo_add - add a bo to a specific vm
2344 * @adev: amdgpu_device pointer
2346 * @bo: amdgpu buffer object
2348 * Add @bo into the requested vm.
2349 * Add @bo to the list of bos associated with the vm
2352 * Newly added bo_va or NULL for failure
2354 * Object has to be reserved!
2356 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
2357 struct amdgpu_vm
*vm
,
2358 struct amdgpu_bo
*bo
)
2360 struct amdgpu_bo_va
*bo_va
;
2362 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
2363 if (bo_va
== NULL
) {
2366 amdgpu_vm_bo_base_init(&bo_va
->base
, vm
, bo
);
2368 bo_va
->ref_count
= 1;
2369 INIT_LIST_HEAD(&bo_va
->valids
);
2370 INIT_LIST_HEAD(&bo_va
->invalids
);
2377 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2379 * @adev: amdgpu_device pointer
2380 * @bo_va: bo_va to store the address
2381 * @mapping: the mapping to insert
2383 * Insert a new mapping into all structures.
2385 static void amdgpu_vm_bo_insert_map(struct amdgpu_device
*adev
,
2386 struct amdgpu_bo_va
*bo_va
,
2387 struct amdgpu_bo_va_mapping
*mapping
)
2389 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2390 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2392 mapping
->bo_va
= bo_va
;
2393 list_add(&mapping
->list
, &bo_va
->invalids
);
2394 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2396 if (mapping
->flags
& AMDGPU_PTE_PRT
)
2397 amdgpu_vm_prt_get(adev
);
2399 if (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
&&
2400 !bo_va
->base
.moved
) {
2401 list_move(&bo_va
->base
.vm_status
, &vm
->moved
);
2403 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
2407 * amdgpu_vm_bo_map - map bo inside a vm
2409 * @adev: amdgpu_device pointer
2410 * @bo_va: bo_va to store the address
2411 * @saddr: where to map the BO
2412 * @offset: requested offset in the BO
2413 * @size: BO size in bytes
2414 * @flags: attributes of pages (read/write/valid/etc.)
2416 * Add a mapping of the BO at the specefied addr into the VM.
2419 * 0 for success, error for failure.
2421 * Object has to be reserved and unreserved outside!
2423 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
2424 struct amdgpu_bo_va
*bo_va
,
2425 uint64_t saddr
, uint64_t offset
,
2426 uint64_t size
, uint64_t flags
)
2428 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2429 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2430 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2433 /* validate the parameters */
2434 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2435 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2438 /* make sure object fit at this offset */
2439 eaddr
= saddr
+ size
- 1;
2440 if (saddr
>= eaddr
||
2441 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
2444 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2445 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2447 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2449 /* bo and tmp overlap, invalid addr */
2450 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2451 "0x%010Lx-0x%010Lx\n", bo
, saddr
, eaddr
,
2452 tmp
->start
, tmp
->last
+ 1);
2456 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2460 mapping
->start
= saddr
;
2461 mapping
->last
= eaddr
;
2462 mapping
->offset
= offset
;
2463 mapping
->flags
= flags
;
2465 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2471 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2473 * @adev: amdgpu_device pointer
2474 * @bo_va: bo_va to store the address
2475 * @saddr: where to map the BO
2476 * @offset: requested offset in the BO
2477 * @size: BO size in bytes
2478 * @flags: attributes of pages (read/write/valid/etc.)
2480 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2481 * mappings as we do so.
2484 * 0 for success, error for failure.
2486 * Object has to be reserved and unreserved outside!
2488 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
2489 struct amdgpu_bo_va
*bo_va
,
2490 uint64_t saddr
, uint64_t offset
,
2491 uint64_t size
, uint64_t flags
)
2493 struct amdgpu_bo_va_mapping
*mapping
;
2494 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2498 /* validate the parameters */
2499 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2500 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2503 /* make sure object fit at this offset */
2504 eaddr
= saddr
+ size
- 1;
2505 if (saddr
>= eaddr
||
2506 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
2509 /* Allocate all the needed memory */
2510 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2514 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->base
.vm
, saddr
, size
);
2520 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2521 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2523 mapping
->start
= saddr
;
2524 mapping
->last
= eaddr
;
2525 mapping
->offset
= offset
;
2526 mapping
->flags
= flags
;
2528 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2534 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2536 * @adev: amdgpu_device pointer
2537 * @bo_va: bo_va to remove the address from
2538 * @saddr: where to the BO is mapped
2540 * Remove a mapping of the BO at the specefied addr from the VM.
2543 * 0 for success, error for failure.
2545 * Object has to be reserved and unreserved outside!
2547 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2548 struct amdgpu_bo_va
*bo_va
,
2551 struct amdgpu_bo_va_mapping
*mapping
;
2552 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2555 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2557 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2558 if (mapping
->start
== saddr
)
2562 if (&mapping
->list
== &bo_va
->valids
) {
2565 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2566 if (mapping
->start
== saddr
)
2570 if (&mapping
->list
== &bo_va
->invalids
)
2574 list_del(&mapping
->list
);
2575 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2576 mapping
->bo_va
= NULL
;
2577 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2580 list_add(&mapping
->list
, &vm
->freed
);
2582 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2583 bo_va
->last_pt_update
);
2589 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2591 * @adev: amdgpu_device pointer
2592 * @vm: VM structure to use
2593 * @saddr: start of the range
2594 * @size: size of the range
2596 * Remove all mappings in a range, split them as appropriate.
2599 * 0 for success, error for failure.
2601 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2602 struct amdgpu_vm
*vm
,
2603 uint64_t saddr
, uint64_t size
)
2605 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2609 eaddr
= saddr
+ size
- 1;
2610 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2611 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2613 /* Allocate all the needed memory */
2614 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2617 INIT_LIST_HEAD(&before
->list
);
2619 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2624 INIT_LIST_HEAD(&after
->list
);
2626 /* Now gather all removed mappings */
2627 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2629 /* Remember mapping split at the start */
2630 if (tmp
->start
< saddr
) {
2631 before
->start
= tmp
->start
;
2632 before
->last
= saddr
- 1;
2633 before
->offset
= tmp
->offset
;
2634 before
->flags
= tmp
->flags
;
2635 before
->bo_va
= tmp
->bo_va
;
2636 list_add(&before
->list
, &tmp
->bo_va
->invalids
);
2639 /* Remember mapping split at the end */
2640 if (tmp
->last
> eaddr
) {
2641 after
->start
= eaddr
+ 1;
2642 after
->last
= tmp
->last
;
2643 after
->offset
= tmp
->offset
;
2644 after
->offset
+= after
->start
- tmp
->start
;
2645 after
->flags
= tmp
->flags
;
2646 after
->bo_va
= tmp
->bo_va
;
2647 list_add(&after
->list
, &tmp
->bo_va
->invalids
);
2650 list_del(&tmp
->list
);
2651 list_add(&tmp
->list
, &removed
);
2653 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2656 /* And free them up */
2657 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2658 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2659 list_del(&tmp
->list
);
2661 if (tmp
->start
< saddr
)
2663 if (tmp
->last
> eaddr
)
2667 list_add(&tmp
->list
, &vm
->freed
);
2668 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2671 /* Insert partial mapping before the range */
2672 if (!list_empty(&before
->list
)) {
2673 amdgpu_vm_it_insert(before
, &vm
->va
);
2674 if (before
->flags
& AMDGPU_PTE_PRT
)
2675 amdgpu_vm_prt_get(adev
);
2680 /* Insert partial mapping after the range */
2681 if (!list_empty(&after
->list
)) {
2682 amdgpu_vm_it_insert(after
, &vm
->va
);
2683 if (after
->flags
& AMDGPU_PTE_PRT
)
2684 amdgpu_vm_prt_get(adev
);
2693 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2695 * @vm: the requested VM
2696 * @addr: the address
2698 * Find a mapping by it's address.
2701 * The amdgpu_bo_va_mapping matching for addr or NULL
2704 struct amdgpu_bo_va_mapping
*amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm
*vm
,
2707 return amdgpu_vm_it_iter_first(&vm
->va
, addr
, addr
);
2711 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2713 * @vm: the requested vm
2714 * @ticket: CS ticket
2716 * Trace all mappings of BOs reserved during a command submission.
2718 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm
*vm
, struct ww_acquire_ctx
*ticket
)
2720 struct amdgpu_bo_va_mapping
*mapping
;
2722 if (!trace_amdgpu_vm_bo_cs_enabled())
2725 for (mapping
= amdgpu_vm_it_iter_first(&vm
->va
, 0, U64_MAX
); mapping
;
2726 mapping
= amdgpu_vm_it_iter_next(mapping
, 0, U64_MAX
)) {
2727 if (mapping
->bo_va
&& mapping
->bo_va
->base
.bo
) {
2728 struct amdgpu_bo
*bo
;
2730 bo
= mapping
->bo_va
->base
.bo
;
2731 if (READ_ONCE(bo
->tbo
.resv
->lock
.ctx
) != ticket
)
2735 trace_amdgpu_vm_bo_cs(mapping
);
2740 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2742 * @adev: amdgpu_device pointer
2743 * @bo_va: requested bo_va
2745 * Remove @bo_va->bo from the requested vm.
2747 * Object have to be reserved!
2749 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2750 struct amdgpu_bo_va
*bo_va
)
2752 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2753 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2754 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2755 struct amdgpu_vm_bo_base
**base
;
2758 if (bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
)
2759 vm
->bulk_moveable
= false;
2761 for (base
= &bo_va
->base
.bo
->vm_bo
; *base
;
2762 base
= &(*base
)->next
) {
2763 if (*base
!= &bo_va
->base
)
2766 *base
= bo_va
->base
.next
;
2771 spin_lock(&vm
->invalidated_lock
);
2772 list_del(&bo_va
->base
.vm_status
);
2773 spin_unlock(&vm
->invalidated_lock
);
2775 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2776 list_del(&mapping
->list
);
2777 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2778 mapping
->bo_va
= NULL
;
2779 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2780 list_add(&mapping
->list
, &vm
->freed
);
2782 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2783 list_del(&mapping
->list
);
2784 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2785 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2786 bo_va
->last_pt_update
);
2789 dma_fence_put(bo_va
->last_pt_update
);
2794 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2796 * @adev: amdgpu_device pointer
2797 * @bo: amdgpu buffer object
2798 * @evicted: is the BO evicted
2800 * Mark @bo as invalid.
2802 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2803 struct amdgpu_bo
*bo
, bool evicted
)
2805 struct amdgpu_vm_bo_base
*bo_base
;
2807 /* shadow bo doesn't have bo base, its validation needs its parent */
2808 if (bo
->parent
&& bo
->parent
->shadow
== bo
)
2811 for (bo_base
= bo
->vm_bo
; bo_base
; bo_base
= bo_base
->next
) {
2812 struct amdgpu_vm
*vm
= bo_base
->vm
;
2814 if (evicted
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
) {
2815 amdgpu_vm_bo_evicted(bo_base
);
2821 bo_base
->moved
= true;
2823 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
2824 amdgpu_vm_bo_relocated(bo_base
);
2825 else if (bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
)
2826 amdgpu_vm_bo_moved(bo_base
);
2828 amdgpu_vm_bo_invalidated(bo_base
);
2833 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2838 * VM page table as power of two
2840 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2842 /* Total bits covered by PD + PTs */
2843 unsigned bits
= ilog2(vm_size
) + 18;
2845 /* Make sure the PD is 4K in size up to 8GB address space.
2846 Above that split equal between PD and PTs */
2850 return ((bits
+ 3) / 2);
2854 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2856 * @adev: amdgpu_device pointer
2857 * @min_vm_size: the minimum vm size in GB if it's set auto
2858 * @fragment_size_default: Default PTE fragment size
2859 * @max_level: max VMPT level
2860 * @max_bits: max address space size in bits
2863 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint32_t min_vm_size
,
2864 uint32_t fragment_size_default
, unsigned max_level
,
2867 unsigned int max_size
= 1 << (max_bits
- 30);
2868 unsigned int vm_size
;
2871 /* adjust vm size first */
2872 if (amdgpu_vm_size
!= -1) {
2873 vm_size
= amdgpu_vm_size
;
2874 if (vm_size
> max_size
) {
2875 dev_warn(adev
->dev
, "VM size (%d) too large, max is %u GB\n",
2876 amdgpu_vm_size
, max_size
);
2881 unsigned int phys_ram_gb
;
2883 /* Optimal VM size depends on the amount of physical
2884 * RAM available. Underlying requirements and
2887 * - Need to map system memory and VRAM from all GPUs
2888 * - VRAM from other GPUs not known here
2889 * - Assume VRAM <= system memory
2890 * - On GFX8 and older, VM space can be segmented for
2892 * - Need to allow room for fragmentation, guard pages etc.
2894 * This adds up to a rough guess of system memory x3.
2895 * Round up to power of two to maximize the available
2896 * VM size with the given page table size.
2899 phys_ram_gb
= ((uint64_t)si
.totalram
* si
.mem_unit
+
2900 (1 << 30) - 1) >> 30;
2901 vm_size
= roundup_pow_of_two(
2902 min(max(phys_ram_gb
* 3, min_vm_size
), max_size
));
2905 adev
->vm_manager
.max_pfn
= (uint64_t)vm_size
<< 18;
2907 tmp
= roundup_pow_of_two(adev
->vm_manager
.max_pfn
);
2908 if (amdgpu_vm_block_size
!= -1)
2909 tmp
>>= amdgpu_vm_block_size
- 9;
2910 tmp
= DIV_ROUND_UP(fls64(tmp
) - 1, 9) - 1;
2911 adev
->vm_manager
.num_level
= min(max_level
, (unsigned)tmp
);
2912 switch (adev
->vm_manager
.num_level
) {
2914 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB2
;
2917 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB1
;
2920 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB0
;
2923 dev_err(adev
->dev
, "VMPT only supports 2~4+1 levels\n");
2925 /* block size depends on vm size and hw setup*/
2926 if (amdgpu_vm_block_size
!= -1)
2927 adev
->vm_manager
.block_size
=
2928 min((unsigned)amdgpu_vm_block_size
, max_bits
2929 - AMDGPU_GPU_PAGE_SHIFT
2930 - 9 * adev
->vm_manager
.num_level
);
2931 else if (adev
->vm_manager
.num_level
> 1)
2932 adev
->vm_manager
.block_size
= 9;
2934 adev
->vm_manager
.block_size
= amdgpu_vm_get_block_size(tmp
);
2936 if (amdgpu_vm_fragment_size
== -1)
2937 adev
->vm_manager
.fragment_size
= fragment_size_default
;
2939 adev
->vm_manager
.fragment_size
= amdgpu_vm_fragment_size
;
2941 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2942 vm_size
, adev
->vm_manager
.num_level
+ 1,
2943 adev
->vm_manager
.block_size
,
2944 adev
->vm_manager
.fragment_size
);
2947 static struct amdgpu_retryfault_hashtable
*init_fault_hash(void)
2949 struct amdgpu_retryfault_hashtable
*fault_hash
;
2951 fault_hash
= kmalloc(sizeof(*fault_hash
), GFP_KERNEL
);
2955 INIT_CHASH_TABLE(fault_hash
->hash
,
2956 AMDGPU_PAGEFAULT_HASH_BITS
, 8, 0);
2957 spin_lock_init(&fault_hash
->lock
);
2958 fault_hash
->count
= 0;
2964 * amdgpu_vm_init - initialize a vm instance
2966 * @adev: amdgpu_device pointer
2968 * @vm_context: Indicates if it GFX or Compute context
2969 * @pasid: Process address space identifier
2974 * 0 for success, error for failure.
2976 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2977 int vm_context
, unsigned int pasid
)
2979 struct amdgpu_bo_param bp
;
2980 struct amdgpu_bo
*root
;
2983 vm
->va
= RB_ROOT_CACHED
;
2984 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2985 vm
->reserved_vmid
[i
] = NULL
;
2986 INIT_LIST_HEAD(&vm
->evicted
);
2987 INIT_LIST_HEAD(&vm
->relocated
);
2988 INIT_LIST_HEAD(&vm
->moved
);
2989 INIT_LIST_HEAD(&vm
->idle
);
2990 INIT_LIST_HEAD(&vm
->invalidated
);
2991 spin_lock_init(&vm
->invalidated_lock
);
2992 INIT_LIST_HEAD(&vm
->freed
);
2994 /* create scheduler entity for page table updates */
2995 r
= drm_sched_entity_init(&vm
->entity
, adev
->vm_manager
.vm_pte_rqs
,
2996 adev
->vm_manager
.vm_pte_num_rqs
, NULL
);
3000 vm
->pte_support_ats
= false;
3002 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
) {
3003 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
3004 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
3006 if (adev
->asic_type
== CHIP_RAVEN
)
3007 vm
->pte_support_ats
= true;
3009 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
3010 AMDGPU_VM_USE_CPU_FOR_GFX
);
3012 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3013 vm
->use_cpu_for_update
? "CPU" : "SDMA");
3014 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
3015 "CPU update of VM recommended only for large BAR system\n");
3016 vm
->last_update
= NULL
;
3018 amdgpu_vm_bo_param(adev
, vm
, adev
->vm_manager
.root_level
, &bp
);
3019 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
)
3020 bp
.flags
&= ~AMDGPU_GEM_CREATE_SHADOW
;
3021 r
= amdgpu_bo_create(adev
, &bp
, &root
);
3023 goto error_free_sched_entity
;
3025 r
= amdgpu_bo_reserve(root
, true);
3027 goto error_free_root
;
3029 r
= amdgpu_vm_clear_bo(adev
, vm
, root
,
3030 adev
->vm_manager
.root_level
,
3031 vm
->pte_support_ats
);
3033 goto error_unreserve
;
3035 amdgpu_vm_bo_base_init(&vm
->root
.base
, vm
, root
);
3036 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
3039 unsigned long flags
;
3041 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3042 r
= idr_alloc(&adev
->vm_manager
.pasid_idr
, vm
, pasid
, pasid
+ 1,
3044 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3046 goto error_free_root
;
3051 vm
->fault_hash
= init_fault_hash();
3052 if (!vm
->fault_hash
) {
3054 goto error_free_root
;
3057 INIT_KFIFO(vm
->faults
);
3058 vm
->fault_credit
= 16;
3063 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
3066 amdgpu_bo_unref(&vm
->root
.base
.bo
->shadow
);
3067 amdgpu_bo_unref(&vm
->root
.base
.bo
);
3068 vm
->root
.base
.bo
= NULL
;
3070 error_free_sched_entity
:
3071 drm_sched_entity_destroy(&vm
->entity
);
3077 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3079 * @adev: amdgpu_device pointer
3082 * This only works on GFX VMs that don't have any BOs added and no
3083 * page tables allocated yet.
3085 * Changes the following VM parameters:
3086 * - use_cpu_for_update
3087 * - pte_supports_ats
3088 * - pasid (old PASID is released, because compute manages its own PASIDs)
3090 * Reinitializes the page directory to reflect the changed ATS
3094 * 0 for success, -errno for errors.
3096 int amdgpu_vm_make_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
, unsigned int pasid
)
3098 bool pte_support_ats
= (adev
->asic_type
== CHIP_RAVEN
);
3101 r
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
3106 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
) || vm
->root
.entries
) {
3112 unsigned long flags
;
3114 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3115 r
= idr_alloc(&adev
->vm_manager
.pasid_idr
, vm
, pasid
, pasid
+ 1,
3117 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3124 /* Check if PD needs to be reinitialized and do it before
3125 * changing any other state, in case it fails.
3127 if (pte_support_ats
!= vm
->pte_support_ats
) {
3128 r
= amdgpu_vm_clear_bo(adev
, vm
, vm
->root
.base
.bo
,
3129 adev
->vm_manager
.root_level
,
3135 /* Update VM state */
3136 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
3137 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
3138 vm
->pte_support_ats
= pte_support_ats
;
3139 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3140 vm
->use_cpu_for_update
? "CPU" : "SDMA");
3141 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_gmc_vram_full_visible(&adev
->gmc
)),
3142 "CPU update of VM recommended only for large BAR system\n");
3145 unsigned long flags
;
3147 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3148 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3149 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3151 /* Free the original amdgpu allocated pasid
3152 * Will be replaced with kfd allocated pasid
3154 amdgpu_pasid_free(vm
->pasid
);
3158 /* Free the shadow bo for compute VM */
3159 amdgpu_bo_unref(&vm
->root
.base
.bo
->shadow
);
3168 unsigned long flags
;
3170 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3171 idr_remove(&adev
->vm_manager
.pasid_idr
, pasid
);
3172 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3175 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
3180 * amdgpu_vm_release_compute - release a compute vm
3181 * @adev: amdgpu_device pointer
3182 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3184 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3185 * pasid from vm. Compute should stop use of vm after this call.
3187 void amdgpu_vm_release_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3190 unsigned long flags
;
3192 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3193 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3194 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3200 * amdgpu_vm_fini - tear down a vm instance
3202 * @adev: amdgpu_device pointer
3206 * Unbind the VM and remove all bos from the vm bo list
3208 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
3210 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
3211 bool prt_fini_needed
= !!adev
->gmc
.gmc_funcs
->set_prt
;
3212 struct amdgpu_bo
*root
;
3216 amdgpu_amdkfd_gpuvm_destroy_cb(adev
, vm
);
3218 /* Clear pending page faults from IH when the VM is destroyed */
3219 while (kfifo_get(&vm
->faults
, &fault
))
3220 amdgpu_vm_clear_fault(vm
->fault_hash
, fault
);
3223 unsigned long flags
;
3225 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
3226 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
3227 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
3230 kfree(vm
->fault_hash
);
3231 vm
->fault_hash
= NULL
;
3233 drm_sched_entity_destroy(&vm
->entity
);
3235 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
)) {
3236 dev_err(adev
->dev
, "still active bo inside vm\n");
3238 rbtree_postorder_for_each_entry_safe(mapping
, tmp
,
3239 &vm
->va
.rb_root
, rb
) {
3240 /* Don't remove the mapping here, we don't want to trigger a
3241 * rebalance and the tree is about to be destroyed anyway.
3243 list_del(&mapping
->list
);
3246 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
3247 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
3248 amdgpu_vm_prt_fini(adev
, vm
);
3249 prt_fini_needed
= false;
3252 list_del(&mapping
->list
);
3253 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
3256 root
= amdgpu_bo_ref(vm
->root
.base
.bo
);
3257 r
= amdgpu_bo_reserve(root
, true);
3259 dev_err(adev
->dev
, "Leaking page tables because BO reservation failed\n");
3261 amdgpu_vm_free_pts(adev
, vm
);
3262 amdgpu_bo_unreserve(root
);
3264 amdgpu_bo_unref(&root
);
3265 dma_fence_put(vm
->last_update
);
3266 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
3267 amdgpu_vmid_free_reserved(adev
, vm
, i
);
3271 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
3273 * @adev: amdgpu_device pointer
3274 * @pasid: PASID do identify the VM
3276 * This function is expected to be called in interrupt context.
3279 * True if there was fault credit, false otherwise
3281 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device
*adev
,
3284 struct amdgpu_vm
*vm
;
3286 spin_lock(&adev
->vm_manager
.pasid_lock
);
3287 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
3289 /* VM not found, can't track fault credit */
3290 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3294 /* No lock needed. only accessed by IRQ handler */
3295 if (!vm
->fault_credit
) {
3296 /* Too many faults in this VM */
3297 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3302 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3307 * amdgpu_vm_manager_init - init the VM manager
3309 * @adev: amdgpu_device pointer
3311 * Initialize the VM manager structures
3313 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
3317 amdgpu_vmid_mgr_init(adev
);
3319 adev
->vm_manager
.fence_context
=
3320 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
3321 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
3322 adev
->vm_manager
.seqno
[i
] = 0;
3324 spin_lock_init(&adev
->vm_manager
.prt_lock
);
3325 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
3327 /* If not overridden by the user, by default, only in large BAR systems
3328 * Compute VM tables will be updated by CPU
3330 #ifdef CONFIG_X86_64
3331 if (amdgpu_vm_update_mode
== -1) {
3332 if (amdgpu_gmc_vram_full_visible(&adev
->gmc
))
3333 adev
->vm_manager
.vm_update_mode
=
3334 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
3336 adev
->vm_manager
.vm_update_mode
= 0;
3338 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
3340 adev
->vm_manager
.vm_update_mode
= 0;
3343 idr_init(&adev
->vm_manager
.pasid_idr
);
3344 spin_lock_init(&adev
->vm_manager
.pasid_lock
);
3348 * amdgpu_vm_manager_fini - cleanup VM manager
3350 * @adev: amdgpu_device pointer
3352 * Cleanup the VM manager and free resources.
3354 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
3356 WARN_ON(!idr_is_empty(&adev
->vm_manager
.pasid_idr
));
3357 idr_destroy(&adev
->vm_manager
.pasid_idr
);
3359 amdgpu_vmid_mgr_fini(adev
);
3363 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3365 * @dev: drm device pointer
3366 * @data: drm_amdgpu_vm
3367 * @filp: drm file pointer
3370 * 0 for success, -errno for errors.
3372 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
3374 union drm_amdgpu_vm
*args
= data
;
3375 struct amdgpu_device
*adev
= dev
->dev_private
;
3376 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
3379 switch (args
->in
.op
) {
3380 case AMDGPU_VM_OP_RESERVE_VMID
:
3381 /* current, we only have requirement to reserve vmid from gfxhub */
3382 r
= amdgpu_vmid_alloc_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);
3386 case AMDGPU_VM_OP_UNRESERVE_VMID
:
3387 amdgpu_vmid_free_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);
3397 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3399 * @adev: drm device pointer
3400 * @pasid: PASID identifier for VM
3401 * @task_info: task_info to fill.
3403 void amdgpu_vm_get_task_info(struct amdgpu_device
*adev
, unsigned int pasid
,
3404 struct amdgpu_task_info
*task_info
)
3406 struct amdgpu_vm
*vm
;
3408 spin_lock(&adev
->vm_manager
.pasid_lock
);
3410 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
3412 *task_info
= vm
->task_info
;
3414 spin_unlock(&adev
->vm_manager
.pasid_lock
);
3418 * amdgpu_vm_set_task_info - Sets VMs task info.
3420 * @vm: vm for which to set the info
3422 void amdgpu_vm_set_task_info(struct amdgpu_vm
*vm
)
3424 if (!vm
->task_info
.pid
) {
3425 vm
->task_info
.pid
= current
->pid
;
3426 get_task_comm(vm
->task_info
.task_name
, current
);
3428 if (current
->group_leader
->mm
== current
->mm
) {
3429 vm
->task_info
.tgid
= current
->group_leader
->pid
;
3430 get_task_comm(vm
->task_info
.process_name
, current
->group_leader
);
3436 * amdgpu_vm_add_fault - Add a page fault record to fault hash table
3438 * @fault_hash: fault hash table
3439 * @key: 64-bit encoding of PASID and address
3441 * This should be called when a retry page fault interrupt is
3442 * received. If this is a new page fault, it will be added to a hash
3443 * table. The return value indicates whether this is a new fault, or
3444 * a fault that was already known and is already being handled.
3446 * If there are too many pending page faults, this will fail. Retry
3447 * interrupts should be ignored in this case until there is enough
3450 * Returns 0 if the fault was added, 1 if the fault was already known,
3451 * -ENOSPC if there are too many pending faults.
3453 int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable
*fault_hash
, u64 key
)
3455 unsigned long flags
;
3458 if (WARN_ON_ONCE(!fault_hash
))
3459 /* Should be allocated in amdgpu_vm_init
3463 spin_lock_irqsave(&fault_hash
->lock
, flags
);
3465 /* Only let the hash table fill up to 50% for best performance */
3466 if (fault_hash
->count
>= (1 << (AMDGPU_PAGEFAULT_HASH_BITS
-1)))
3469 r
= chash_table_copy_in(&fault_hash
->hash
, key
, NULL
);
3471 fault_hash
->count
++;
3473 /* chash_table_copy_in should never fail unless we're losing count */
3474 WARN_ON_ONCE(r
< 0);
3477 spin_unlock_irqrestore(&fault_hash
->lock
, flags
);
3482 * amdgpu_vm_clear_fault - Remove a page fault record
3484 * @fault_hash: fault hash table
3485 * @key: 64-bit encoding of PASID and address
3487 * This should be called when a page fault has been handled. Any
3488 * future interrupt with this key will be processed as a new
3491 void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable
*fault_hash
, u64 key
)
3493 unsigned long flags
;
3499 spin_lock_irqsave(&fault_hash
->lock
, flags
);
3501 r
= chash_table_remove(&fault_hash
->hash
, key
, NULL
);
3502 if (!WARN_ON_ONCE(r
< 0)) {
3503 fault_hash
->count
--;
3504 WARN_ON_ONCE(fault_hash
->count
< 0);
3507 spin_unlock_irqrestore(&fault_hash
->lock
, flags
);