struct dma_fence **fence)
{
uint64_t init_pte_value = adev->gmc.init_pte_flags;
+ uint64_t gpu_start, gpu_end;
- pr_debug("[0x%llx 0x%llx]\n", start, last);
+ /* Convert CPU page range to GPU page range */
+ gpu_start = start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ gpu_end = (last + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
- return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
- last, init_pte_value, 0, 0, NULL, NULL,
+ pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last,
+ gpu_start, gpu_end);
+ return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start,
+ gpu_end, init_pte_value, 0, 0, NULL, NULL,
fence);
}
last_start, last_start + npages - 1, readonly);
for (i = offset; i < offset + npages; i++) {
+ uint64_t gpu_start;
+ uint64_t gpu_end;
+
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
- pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
- prange->svms, last_start, prange->start + i,
- (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
- pte_flags);
/* For dGPU mode, we use same vm_manager to allocate VRAM for
* different memory partition based on fpfn/lpfn, we should use
* same vm_manager.vram_base_offset regardless memory partition.
*/
+ gpu_start = last_start * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ gpu_end = (prange->start + i + 1) * AMDGPU_GPU_PAGES_IN_CPU_PAGE - 1;
+
+ pr_debug("svms 0x%p map CPU[0x%lx 0x%llx] GPU[0x%llx 0x%llx] vram %d PTE 0x%llx\n",
+ prange->svms, last_start, prange->start + i,
+ gpu_start, gpu_end,
+ (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
+ pte_flags);
+
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
- NULL, last_start, prange->start + i,
+ NULL, gpu_start, gpu_end,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,