1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/genalloc.h>
16 #define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
17 #define HL_MMU_DEBUG 0
20 * The va ranges in context object contain a list with the available chunks of
21 * device virtual memory.
22 * There is one range for host allocations and one for DRAM allocations.
24 * On initialization each range contains one chunk of all of its available
25 * virtual range which is a half of the total device virtual range.
27 * On each mapping of physical pages, a suitable virtual range chunk (with a
28 * minimum size) is selected from the list. If the chunk size equals the
29 * requested size, the chunk is returned. Otherwise, the chunk is split into
30 * two chunks - one to return as result and a remainder to stay in the list.
32 * On each Unmapping of a virtual address, the relevant virtual chunk is
33 * returned to the list. The chunk is added to the list and if its edges match
34 * the edges of the adjacent chunks (means a contiguous chunk can be created),
35 * the chunks are merged.
37 * On finish, the list is checked to have only one chunk of all the relevant
38 * virtual range (which is a half of the device total virtual range).
39 * If not (means not all mappings were unmapped), a warning is printed.
43 * alloc_device_memory - allocate device memory
45 * @ctx : current context
46 * @args : host parameters containing the requested size
47 * @ret_handle : result handle
49 * This function does the following:
50 * - Allocate the requested size rounded up to 2MB pages
51 * - Return unique handle
53 static int alloc_device_memory(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
56 struct hl_device
*hdev
= ctx
->hdev
;
57 struct hl_vm
*vm
= &hdev
->vm
;
58 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
59 u64 paddr
= 0, total_size
, num_pgs
, i
;
60 u32 num_curr_pgs
, page_size
, page_shift
;
65 page_size
= hdev
->asic_prop
.dram_page_size
;
66 page_shift
= __ffs(page_size
);
67 num_pgs
= (args
->alloc
.mem_size
+ (page_size
- 1)) >> page_shift
;
68 total_size
= num_pgs
<< page_shift
;
70 contiguous
= args
->flags
& HL_MEM_CONTIGUOUS
;
73 paddr
= (u64
) gen_pool_alloc(vm
->dram_pg_pool
, total_size
);
76 "failed to allocate %llu huge contiguous pages\n",
82 phys_pg_pack
= kzalloc(sizeof(*phys_pg_pack
), GFP_KERNEL
);
88 phys_pg_pack
->vm_type
= VM_TYPE_PHYS_PACK
;
89 phys_pg_pack
->asid
= ctx
->asid
;
90 phys_pg_pack
->npages
= num_pgs
;
91 phys_pg_pack
->page_size
= page_size
;
92 phys_pg_pack
->total_size
= total_size
;
93 phys_pg_pack
->flags
= args
->flags
;
94 phys_pg_pack
->contiguous
= contiguous
;
96 phys_pg_pack
->pages
= kvmalloc_array(num_pgs
, sizeof(u64
), GFP_KERNEL
);
97 if (!phys_pg_pack
->pages
) {
102 if (phys_pg_pack
->contiguous
) {
103 for (i
= 0 ; i
< num_pgs
; i
++)
104 phys_pg_pack
->pages
[i
] = paddr
+ i
* page_size
;
106 for (i
= 0 ; i
< num_pgs
; i
++) {
107 phys_pg_pack
->pages
[i
] = (u64
) gen_pool_alloc(
110 if (!phys_pg_pack
->pages
[i
]) {
112 "ioctl failed to allocate page\n");
121 spin_lock(&vm
->idr_lock
);
122 handle
= idr_alloc(&vm
->phys_pg_pack_handles
, phys_pg_pack
, 1, 0,
124 spin_unlock(&vm
->idr_lock
);
127 dev_err(hdev
->dev
, "Failed to get handle for page\n");
132 for (i
= 0 ; i
< num_pgs
; i
++)
133 kref_get(&vm
->dram_pg_pool_refcount
);
135 phys_pg_pack
->handle
= handle
;
137 atomic64_add(phys_pg_pack
->total_size
, &ctx
->dram_phys_mem
);
138 atomic64_add(phys_pg_pack
->total_size
, &hdev
->dram_used_mem
);
140 *ret_handle
= handle
;
146 if (!phys_pg_pack
->contiguous
)
147 for (i
= 0 ; i
< num_curr_pgs
; i
++)
148 gen_pool_free(vm
->dram_pg_pool
, phys_pg_pack
->pages
[i
],
151 kvfree(phys_pg_pack
->pages
);
156 gen_pool_free(vm
->dram_pg_pool
, paddr
, total_size
);
162 * get_userptr_from_host_va - initialize userptr structure from given host
165 * @hdev : habanalabs device structure
166 * @args : parameters containing the virtual address and size
167 * @p_userptr : pointer to result userptr structure
169 * This function does the following:
170 * - Allocate userptr structure
171 * - Pin the given host memory using the userptr structure
172 * - Perform DMA mapping to have the DMA addresses of the pages
174 static int get_userptr_from_host_va(struct hl_device
*hdev
,
175 struct hl_mem_in
*args
, struct hl_userptr
**p_userptr
)
177 struct hl_userptr
*userptr
;
180 userptr
= kzalloc(sizeof(*userptr
), GFP_KERNEL
);
186 rc
= hl_pin_host_memory(hdev
, args
->map_host
.host_virt_addr
,
187 args
->map_host
.mem_size
, userptr
);
189 dev_err(hdev
->dev
, "Failed to pin host memory\n");
193 rc
= hdev
->asic_funcs
->asic_dma_map_sg(hdev
, userptr
->sgt
->sgl
,
194 userptr
->sgt
->nents
, DMA_BIDIRECTIONAL
);
196 dev_err(hdev
->dev
, "failed to map sgt with DMA region\n");
200 userptr
->dma_mapped
= true;
201 userptr
->dir
= DMA_BIDIRECTIONAL
;
202 userptr
->vm_type
= VM_TYPE_USERPTR
;
204 *p_userptr
= userptr
;
209 hl_unpin_host_memory(hdev
, userptr
);
218 * free_userptr - free userptr structure
220 * @hdev : habanalabs device structure
221 * @userptr : userptr to free
223 * This function does the following:
224 * - Unpins the physical pages
225 * - Frees the userptr structure
227 static void free_userptr(struct hl_device
*hdev
, struct hl_userptr
*userptr
)
229 hl_unpin_host_memory(hdev
, userptr
);
234 * dram_pg_pool_do_release - free DRAM pages pool
236 * @ref : pointer to reference object
238 * This function does the following:
239 * - Frees the idr structure of physical pages handles
240 * - Frees the generic pool of DRAM physical pages
242 static void dram_pg_pool_do_release(struct kref
*ref
)
244 struct hl_vm
*vm
= container_of(ref
, struct hl_vm
,
245 dram_pg_pool_refcount
);
248 * free the idr here as only here we know for sure that there are no
249 * allocated physical pages and hence there are no handles in use
251 idr_destroy(&vm
->phys_pg_pack_handles
);
252 gen_pool_destroy(vm
->dram_pg_pool
);
256 * free_phys_pg_pack - free physical page pack
258 * @hdev : habanalabs device structure
259 * @phys_pg_pack : physical page pack to free
261 * This function does the following:
262 * - For DRAM memory only, iterate over the pack and free each physical block
263 * structure by returning it to the general pool
264 * - Free the hl_vm_phys_pg_pack structure
266 static void free_phys_pg_pack(struct hl_device
*hdev
,
267 struct hl_vm_phys_pg_pack
*phys_pg_pack
)
269 struct hl_vm
*vm
= &hdev
->vm
;
272 if (!phys_pg_pack
->created_from_userptr
) {
273 if (phys_pg_pack
->contiguous
) {
274 gen_pool_free(vm
->dram_pg_pool
, phys_pg_pack
->pages
[0],
275 phys_pg_pack
->total_size
);
277 for (i
= 0; i
< phys_pg_pack
->npages
; i
++)
278 kref_put(&vm
->dram_pg_pool_refcount
,
279 dram_pg_pool_do_release
);
281 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++) {
282 gen_pool_free(vm
->dram_pg_pool
,
283 phys_pg_pack
->pages
[i
],
284 phys_pg_pack
->page_size
);
285 kref_put(&vm
->dram_pg_pool_refcount
,
286 dram_pg_pool_do_release
);
291 kvfree(phys_pg_pack
->pages
);
296 * free_device_memory - free device memory
298 * @ctx : current context
299 * @handle : handle of the memory chunk to free
301 * This function does the following:
302 * - Free the device memory related to the given handle
304 static int free_device_memory(struct hl_ctx
*ctx
, u32 handle
)
306 struct hl_device
*hdev
= ctx
->hdev
;
307 struct hl_vm
*vm
= &hdev
->vm
;
308 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
310 spin_lock(&vm
->idr_lock
);
311 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
313 if (atomic_read(&phys_pg_pack
->mapping_cnt
) > 0) {
314 dev_err(hdev
->dev
, "handle %u is mapped, cannot free\n",
316 spin_unlock(&vm
->idr_lock
);
321 * must remove from idr before the freeing of the physical
322 * pages as the refcount of the pool is also the trigger of the
325 idr_remove(&vm
->phys_pg_pack_handles
, handle
);
326 spin_unlock(&vm
->idr_lock
);
328 atomic64_sub(phys_pg_pack
->total_size
, &ctx
->dram_phys_mem
);
329 atomic64_sub(phys_pg_pack
->total_size
, &hdev
->dram_used_mem
);
331 free_phys_pg_pack(hdev
, phys_pg_pack
);
333 spin_unlock(&vm
->idr_lock
);
335 "free device memory failed, no match for handle %u\n",
344 * clear_va_list_locked - free virtual addresses list
346 * @hdev : habanalabs device structure
347 * @va_list : list of virtual addresses to free
349 * This function does the following:
350 * - Iterate over the list and free each virtual addresses block
352 * This function should be called only when va_list lock is taken
354 static void clear_va_list_locked(struct hl_device
*hdev
,
355 struct list_head
*va_list)
357 struct hl_vm_va_block
*va_block
, *tmp
;
359 list_for_each_entry_safe(va_block
, tmp
, va_list, node
) {
360 list_del(&va_block
->node
);
366 * print_va_list_locked - print virtual addresses list
368 * @hdev : habanalabs device structure
369 * @va_list : list of virtual addresses to print
371 * This function does the following:
372 * - Iterate over the list and print each virtual addresses block
374 * This function should be called only when va_list lock is taken
376 static void print_va_list_locked(struct hl_device
*hdev
,
377 struct list_head
*va_list)
380 struct hl_vm_va_block
*va_block
;
382 dev_dbg(hdev
->dev
, "print va list:\n");
384 list_for_each_entry(va_block
, va_list, node
)
386 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
387 va_block
->start
, va_block
->end
, va_block
->size
);
392 * merge_va_blocks_locked - merge a virtual block if possible
394 * @hdev : pointer to the habanalabs device structure
395 * @va_list : pointer to the virtual addresses block list
396 * @va_block : virtual block to merge with adjacent blocks
398 * This function does the following:
399 * - Merge the given blocks with the adjacent blocks if their virtual ranges
400 * create a contiguous virtual range
402 * This Function should be called only when va_list lock is taken
404 static void merge_va_blocks_locked(struct hl_device
*hdev
,
405 struct list_head
*va_list, struct hl_vm_va_block
*va_block
)
407 struct hl_vm_va_block
*prev
, *next
;
409 prev
= list_prev_entry(va_block
, node
);
410 if (&prev
->node
!= va_list && prev
->end
+ 1 == va_block
->start
) {
411 prev
->end
= va_block
->end
;
412 prev
->size
= prev
->end
- prev
->start
;
413 list_del(&va_block
->node
);
418 next
= list_next_entry(va_block
, node
);
419 if (&next
->node
!= va_list && va_block
->end
+ 1 == next
->start
) {
420 next
->start
= va_block
->start
;
421 next
->size
= next
->end
- next
->start
;
422 list_del(&va_block
->node
);
428 * add_va_block_locked - add a virtual block to the virtual addresses list
430 * @hdev : pointer to the habanalabs device structure
431 * @va_list : pointer to the virtual addresses block list
432 * @start : start virtual address
433 * @end : end virtual address
435 * This function does the following:
436 * - Add the given block to the virtual blocks list and merge with other
437 * blocks if a contiguous virtual block can be created
439 * This Function should be called only when va_list lock is taken
441 static int add_va_block_locked(struct hl_device
*hdev
,
442 struct list_head
*va_list, u64 start
, u64 end
)
444 struct hl_vm_va_block
*va_block
, *res
= NULL
;
445 u64 size
= end
- start
;
447 print_va_list_locked(hdev
, va_list);
449 list_for_each_entry(va_block
, va_list, node
) {
450 /* TODO: remove upon matureness */
451 if (hl_mem_area_crosses_range(start
, size
, va_block
->start
,
454 "block crossing ranges at start 0x%llx, end 0x%llx\n",
455 va_block
->start
, va_block
->end
);
459 if (va_block
->end
< start
)
463 va_block
= kmalloc(sizeof(*va_block
), GFP_KERNEL
);
467 va_block
->start
= start
;
469 va_block
->size
= size
;
472 list_add(&va_block
->node
, va_list);
474 list_add(&va_block
->node
, &res
->node
);
476 merge_va_blocks_locked(hdev
, va_list, va_block
);
478 print_va_list_locked(hdev
, va_list);
484 * add_va_block - wrapper for add_va_block_locked
486 * @hdev : pointer to the habanalabs device structure
487 * @va_list : pointer to the virtual addresses block list
488 * @start : start virtual address
489 * @end : end virtual address
491 * This function does the following:
492 * - Takes the list lock and calls add_va_block_locked
494 static inline int add_va_block(struct hl_device
*hdev
,
495 struct hl_va_range
*va_range
, u64 start
, u64 end
)
499 mutex_lock(&va_range
->lock
);
500 rc
= add_va_block_locked(hdev
, &va_range
->list
, start
, end
);
501 mutex_unlock(&va_range
->lock
);
507 * get_va_block - get a virtual block with the requested size
509 * @hdev : pointer to the habanalabs device structure
510 * @va_range : pointer to the virtual addresses range
511 * @size : requested block size
512 * @hint_addr : hint for request address by the user
513 * @is_userptr : is host or DRAM memory
515 * This function does the following:
516 * - Iterate on the virtual block list to find a suitable virtual block for the
518 * - Reserve the requested block and update the list
519 * - Return the start address of the virtual block
521 static u64
get_va_block(struct hl_device
*hdev
,
522 struct hl_va_range
*va_range
, u64 size
, u64 hint_addr
,
525 struct hl_vm_va_block
*va_block
, *new_va_block
= NULL
;
526 u64 valid_start
, valid_size
, prev_start
, prev_end
, page_mask
,
527 res_valid_start
= 0, res_valid_size
= 0;
529 bool add_prev
= false;
533 * We cannot know if the user allocated memory with huge pages
534 * or not, hence we continue with the biggest possible
537 page_size
= PAGE_SIZE_2MB
;
538 page_mask
= PAGE_MASK_2MB
;
540 page_size
= hdev
->asic_prop
.dram_page_size
;
541 page_mask
= ~((u64
)page_size
- 1);
544 mutex_lock(&va_range
->lock
);
546 print_va_list_locked(hdev
, &va_range
->list
);
548 list_for_each_entry(va_block
, &va_range
->list
, node
) {
549 /* calc the first possible aligned addr */
550 valid_start
= va_block
->start
;
553 if (valid_start
& (page_size
- 1)) {
554 valid_start
&= page_mask
;
555 valid_start
+= page_size
;
556 if (valid_start
> va_block
->end
)
560 valid_size
= va_block
->end
- valid_start
;
562 if (valid_size
>= size
&&
563 (!new_va_block
|| valid_size
< res_valid_size
)) {
565 new_va_block
= va_block
;
566 res_valid_start
= valid_start
;
567 res_valid_size
= valid_size
;
570 if (hint_addr
&& hint_addr
>= valid_start
&&
571 ((hint_addr
+ size
) <= va_block
->end
)) {
572 new_va_block
= va_block
;
573 res_valid_start
= hint_addr
;
574 res_valid_size
= valid_size
;
580 dev_err(hdev
->dev
, "no available va block for size %llu\n",
585 if (res_valid_start
> new_va_block
->start
) {
586 prev_start
= new_va_block
->start
;
587 prev_end
= res_valid_start
- 1;
589 new_va_block
->start
= res_valid_start
;
590 new_va_block
->size
= res_valid_size
;
595 if (new_va_block
->size
> size
) {
596 new_va_block
->start
+= size
;
597 new_va_block
->size
= new_va_block
->end
- new_va_block
->start
;
599 list_del(&new_va_block
->node
);
604 add_va_block_locked(hdev
, &va_range
->list
, prev_start
,
607 print_va_list_locked(hdev
, &va_range
->list
);
609 mutex_unlock(&va_range
->lock
);
611 return res_valid_start
;
615 * get_sg_info - get number of pages and the DMA address from SG list
618 * @dma_addr : pointer to DMA address to return
620 * Calculate the number of consecutive pages described by the SG list. Take the
621 * offset of the address in the first page, add to it the length and round it up
622 * to the number of needed pages.
624 static u32
get_sg_info(struct scatterlist
*sg
, dma_addr_t
*dma_addr
)
626 *dma_addr
= sg_dma_address(sg
);
628 return ((((*dma_addr
) & (PAGE_SIZE
- 1)) + sg_dma_len(sg
)) +
629 (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
633 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
636 * @ctx : current context
637 * @userptr : userptr to initialize from
638 * @pphys_pg_pack : res pointer
640 * This function does the following:
641 * - Pin the physical pages related to the given virtual block
642 * - Create a physical page pack from the physical pages related to the given
645 static int init_phys_pg_pack_from_userptr(struct hl_ctx
*ctx
,
646 struct hl_userptr
*userptr
,
647 struct hl_vm_phys_pg_pack
**pphys_pg_pack
)
649 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
650 struct scatterlist
*sg
;
652 u64 page_mask
, total_npages
;
653 u32 npages
, page_size
= PAGE_SIZE
;
654 bool first
= true, is_huge_page_opt
= true;
657 phys_pg_pack
= kzalloc(sizeof(*phys_pg_pack
), GFP_KERNEL
);
661 phys_pg_pack
->vm_type
= userptr
->vm_type
;
662 phys_pg_pack
->created_from_userptr
= true;
663 phys_pg_pack
->asid
= ctx
->asid
;
664 atomic_set(&phys_pg_pack
->mapping_cnt
, 1);
666 /* Only if all dma_addrs are aligned to 2MB and their
667 * sizes is at least 2MB, we can use huge page mapping.
668 * We limit the 2MB optimization to this condition,
669 * since later on we acquire the related VA range as one
673 for_each_sg(userptr
->sgt
->sgl
, sg
, userptr
->sgt
->nents
, i
) {
674 npages
= get_sg_info(sg
, &dma_addr
);
676 total_npages
+= npages
;
680 dma_addr
&= PAGE_MASK_2MB
;
683 if ((npages
% PGS_IN_2MB_PAGE
) ||
684 (dma_addr
& (PAGE_SIZE_2MB
- 1)))
685 is_huge_page_opt
= false;
688 if (is_huge_page_opt
) {
689 page_size
= PAGE_SIZE_2MB
;
690 total_npages
/= PGS_IN_2MB_PAGE
;
693 page_mask
= ~(((u64
) page_size
) - 1);
695 phys_pg_pack
->pages
= kvmalloc_array(total_npages
, sizeof(u64
),
697 if (!phys_pg_pack
->pages
) {
699 goto page_pack_arr_mem_err
;
702 phys_pg_pack
->npages
= total_npages
;
703 phys_pg_pack
->page_size
= page_size
;
704 phys_pg_pack
->total_size
= total_npages
* page_size
;
708 for_each_sg(userptr
->sgt
->sgl
, sg
, userptr
->sgt
->nents
, i
) {
709 npages
= get_sg_info(sg
, &dma_addr
);
711 /* align down to physical page size and save the offset */
714 phys_pg_pack
->offset
= dma_addr
& (page_size
- 1);
715 dma_addr
&= page_mask
;
719 phys_pg_pack
->pages
[j
++] = dma_addr
;
720 dma_addr
+= page_size
;
722 if (is_huge_page_opt
)
723 npages
-= PGS_IN_2MB_PAGE
;
729 *pphys_pg_pack
= phys_pg_pack
;
733 page_pack_arr_mem_err
:
740 * map_phys_page_pack - maps the physical page pack
742 * @ctx : current context
743 * @vaddr : start address of the virtual area to map from
744 * @phys_pg_pack : the pack of physical pages to map to
746 * This function does the following:
747 * - Maps each chunk of virtual memory to matching physical chunk
748 * - Stores number of successful mappings in the given argument
749 * - Returns 0 on success, error code otherwise.
751 static int map_phys_page_pack(struct hl_ctx
*ctx
, u64 vaddr
,
752 struct hl_vm_phys_pg_pack
*phys_pg_pack
)
754 struct hl_device
*hdev
= ctx
->hdev
;
755 u64 next_vaddr
= vaddr
, paddr
, mapped_pg_cnt
= 0, i
;
756 u32 page_size
= phys_pg_pack
->page_size
;
759 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++) {
760 paddr
= phys_pg_pack
->pages
[i
];
762 /* For accessing the host we need to turn on bit 39 */
763 if (phys_pg_pack
->created_from_userptr
)
764 paddr
+= hdev
->asic_prop
.host_phys_base_address
;
766 rc
= hl_mmu_map(ctx
, next_vaddr
, paddr
, page_size
);
769 "map failed for handle %u, npages: %llu, mapped: %llu",
770 phys_pg_pack
->handle
, phys_pg_pack
->npages
,
776 next_vaddr
+= page_size
;
783 for (i
= 0 ; i
< mapped_pg_cnt
; i
++) {
784 if (hl_mmu_unmap(ctx
, next_vaddr
, page_size
))
785 dev_warn_ratelimited(hdev
->dev
,
786 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
787 phys_pg_pack
->handle
, next_vaddr
,
788 phys_pg_pack
->pages
[i
], page_size
);
790 next_vaddr
+= page_size
;
796 static int get_paddr_from_handle(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
799 struct hl_device
*hdev
= ctx
->hdev
;
800 struct hl_vm
*vm
= &hdev
->vm
;
801 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
804 handle
= lower_32_bits(args
->map_device
.handle
);
805 spin_lock(&vm
->idr_lock
);
806 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
808 spin_unlock(&vm
->idr_lock
);
809 dev_err(hdev
->dev
, "no match for handle %u\n", handle
);
813 *paddr
= phys_pg_pack
->pages
[0];
815 spin_unlock(&vm
->idr_lock
);
821 * map_device_va - map the given memory
823 * @ctx : current context
824 * @args : host parameters with handle/host virtual address
825 * @device_addr : pointer to result device virtual address
827 * This function does the following:
828 * - If given a physical device memory handle, map to a device virtual block
829 * and return the start address of this block
830 * - If given a host virtual address and size, find the related physical pages,
831 * map a device virtual block to this pages and return the start address of
834 static int map_device_va(struct hl_ctx
*ctx
, struct hl_mem_in
*args
,
837 struct hl_device
*hdev
= ctx
->hdev
;
838 struct hl_vm
*vm
= &hdev
->vm
;
839 struct hl_vm_phys_pg_pack
*phys_pg_pack
;
840 struct hl_userptr
*userptr
= NULL
;
841 struct hl_vm_hash_node
*hnode
;
842 enum vm_type_t
*vm_type
;
843 u64 ret_vaddr
, hint_addr
;
846 bool is_userptr
= args
->flags
& HL_MEM_USERPTR
;
852 rc
= get_userptr_from_host_va(hdev
, args
, &userptr
);
854 dev_err(hdev
->dev
, "failed to get userptr from va\n");
858 rc
= init_phys_pg_pack_from_userptr(ctx
, userptr
,
862 "unable to init page pack for vaddr 0x%llx\n",
863 args
->map_host
.host_virt_addr
);
864 goto init_page_pack_err
;
867 vm_type
= (enum vm_type_t
*) userptr
;
868 hint_addr
= args
->map_host
.hint_addr
;
870 handle
= lower_32_bits(args
->map_device
.handle
);
872 spin_lock(&vm
->idr_lock
);
873 phys_pg_pack
= idr_find(&vm
->phys_pg_pack_handles
, handle
);
875 spin_unlock(&vm
->idr_lock
);
877 "no match for handle %u\n", handle
);
881 /* increment now to avoid freeing device memory while mapping */
882 atomic_inc(&phys_pg_pack
->mapping_cnt
);
884 spin_unlock(&vm
->idr_lock
);
886 vm_type
= (enum vm_type_t
*) phys_pg_pack
;
888 hint_addr
= args
->map_device
.hint_addr
;
892 * relevant for mapping device physical memory only, as host memory is
895 if (!is_userptr
&& !(phys_pg_pack
->flags
& HL_MEM_SHARED
) &&
896 phys_pg_pack
->asid
!= ctx
->asid
) {
898 "Failed to map memory, handle %u is not shared\n",
904 hnode
= kzalloc(sizeof(*hnode
), GFP_KERNEL
);
910 ret_vaddr
= get_va_block(hdev
,
911 is_userptr
? &ctx
->host_va_range
: &ctx
->dram_va_range
,
912 phys_pg_pack
->total_size
, hint_addr
, is_userptr
);
914 dev_err(hdev
->dev
, "no available va block for handle %u\n",
920 mutex_lock(&ctx
->mmu_lock
);
922 rc
= map_phys_page_pack(ctx
, ret_vaddr
, phys_pg_pack
);
924 mutex_unlock(&ctx
->mmu_lock
);
925 dev_err(hdev
->dev
, "mapping page pack failed for handle %u\n",
930 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, false);
932 mutex_unlock(&ctx
->mmu_lock
);
934 ret_vaddr
+= phys_pg_pack
->offset
;
936 hnode
->ptr
= vm_type
;
937 hnode
->vaddr
= ret_vaddr
;
939 mutex_lock(&ctx
->mem_hash_lock
);
940 hash_add(ctx
->mem_hash
, &hnode
->node
, ret_vaddr
);
941 mutex_unlock(&ctx
->mem_hash_lock
);
943 *device_addr
= ret_vaddr
;
946 free_phys_pg_pack(hdev
, phys_pg_pack
);
951 if (add_va_block(hdev
,
952 is_userptr
? &ctx
->host_va_range
: &ctx
->dram_va_range
,
954 ret_vaddr
+ phys_pg_pack
->total_size
- 1))
956 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
963 atomic_dec(&phys_pg_pack
->mapping_cnt
);
965 free_phys_pg_pack(hdev
, phys_pg_pack
);
968 free_userptr(hdev
, userptr
);
974 * unmap_device_va - unmap the given device virtual address
976 * @ctx : current context
977 * @vaddr : device virtual address to unmap
979 * This function does the following:
980 * - Unmap the physical pages related to the given virtual address
981 * - return the device virtual block to the virtual block list
983 static int unmap_device_va(struct hl_ctx
*ctx
, u64 vaddr
)
985 struct hl_device
*hdev
= ctx
->hdev
;
986 struct hl_vm_phys_pg_pack
*phys_pg_pack
= NULL
;
987 struct hl_vm_hash_node
*hnode
= NULL
;
988 struct hl_userptr
*userptr
= NULL
;
989 enum vm_type_t
*vm_type
;
995 /* protect from double entrance */
996 mutex_lock(&ctx
->mem_hash_lock
);
997 hash_for_each_possible(ctx
->mem_hash
, hnode
, node
, (unsigned long)vaddr
)
998 if (vaddr
== hnode
->vaddr
)
1002 mutex_unlock(&ctx
->mem_hash_lock
);
1004 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1009 hash_del(&hnode
->node
);
1010 mutex_unlock(&ctx
->mem_hash_lock
);
1012 vm_type
= hnode
->ptr
;
1014 if (*vm_type
== VM_TYPE_USERPTR
) {
1016 userptr
= hnode
->ptr
;
1017 rc
= init_phys_pg_pack_from_userptr(ctx
, userptr
,
1021 "unable to init page pack for vaddr 0x%llx\n",
1025 } else if (*vm_type
== VM_TYPE_PHYS_PACK
) {
1027 phys_pg_pack
= hnode
->ptr
;
1030 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1036 if (atomic_read(&phys_pg_pack
->mapping_cnt
) == 0) {
1037 dev_err(hdev
->dev
, "vaddr 0x%llx is not mapped\n", vaddr
);
1039 goto mapping_cnt_err
;
1042 page_size
= phys_pg_pack
->page_size
;
1043 vaddr
&= ~(((u64
) page_size
) - 1);
1047 mutex_lock(&ctx
->mmu_lock
);
1049 for (i
= 0 ; i
< phys_pg_pack
->npages
; i
++, next_vaddr
+= page_size
)
1050 if (hl_mmu_unmap(ctx
, next_vaddr
, page_size
))
1051 dev_warn_ratelimited(hdev
->dev
,
1052 "unmap failed for vaddr: 0x%llx\n", next_vaddr
);
1054 hdev
->asic_funcs
->mmu_invalidate_cache(hdev
, true);
1056 mutex_unlock(&ctx
->mmu_lock
);
1058 if (add_va_block(hdev
,
1059 is_userptr
? &ctx
->host_va_range
: &ctx
->dram_va_range
,
1061 vaddr
+ phys_pg_pack
->total_size
- 1))
1062 dev_warn(hdev
->dev
, "add va block failed for vaddr: 0x%llx\n",
1065 atomic_dec(&phys_pg_pack
->mapping_cnt
);
1069 free_phys_pg_pack(hdev
, phys_pg_pack
);
1070 free_userptr(hdev
, userptr
);
1077 free_phys_pg_pack(hdev
, phys_pg_pack
);
1079 mutex_lock(&ctx
->mem_hash_lock
);
1080 hash_add(ctx
->mem_hash
, &hnode
->node
, vaddr
);
1081 mutex_unlock(&ctx
->mem_hash_lock
);
1086 int hl_mem_ioctl(struct hl_fpriv
*hpriv
, void *data
)
1088 union hl_mem_args
*args
= data
;
1089 struct hl_device
*hdev
= hpriv
->hdev
;
1090 struct hl_ctx
*ctx
= hpriv
->ctx
;
1091 u64 device_addr
= 0;
1095 if (hl_device_disabled_or_in_reset(hdev
)) {
1096 dev_warn_ratelimited(hdev
->dev
,
1097 "Device is disabled or in reset. Can't execute memory IOCTL\n");
1101 if (hdev
->mmu_enable
) {
1102 switch (args
->in
.op
) {
1103 case HL_MEM_OP_ALLOC
:
1104 if (!hdev
->dram_supports_virtual_memory
) {
1106 "DRAM alloc is not supported\n");
1110 if (args
->in
.alloc
.mem_size
== 0) {
1112 "alloc size must be larger than 0\n");
1116 rc
= alloc_device_memory(ctx
, &args
->in
, &handle
);
1118 memset(args
, 0, sizeof(*args
));
1119 args
->out
.handle
= (__u64
) handle
;
1122 case HL_MEM_OP_FREE
:
1123 if (!hdev
->dram_supports_virtual_memory
) {
1125 "DRAM free is not supported\n");
1129 rc
= free_device_memory(ctx
, args
->in
.free
.handle
);
1133 rc
= map_device_va(ctx
, &args
->in
, &device_addr
);
1135 memset(args
, 0, sizeof(*args
));
1136 args
->out
.device_virt_addr
= device_addr
;
1139 case HL_MEM_OP_UNMAP
:
1140 rc
= unmap_device_va(ctx
,
1141 args
->in
.unmap
.device_virt_addr
);
1145 dev_err(hdev
->dev
, "Unknown opcode for memory IOCTL\n");
1150 switch (args
->in
.op
) {
1151 case HL_MEM_OP_ALLOC
:
1152 if (args
->in
.alloc
.mem_size
== 0) {
1154 "alloc size must be larger than 0\n");
1159 /* Force contiguous as there are no real MMU
1160 * translations to overcome physical memory gaps
1162 args
->in
.flags
|= HL_MEM_CONTIGUOUS
;
1163 rc
= alloc_device_memory(ctx
, &args
->in
, &handle
);
1165 memset(args
, 0, sizeof(*args
));
1166 args
->out
.handle
= (__u64
) handle
;
1169 case HL_MEM_OP_FREE
:
1170 rc
= free_device_memory(ctx
, args
->in
.free
.handle
);
1174 if (args
->in
.flags
& HL_MEM_USERPTR
) {
1175 device_addr
= args
->in
.map_host
.host_virt_addr
;
1178 rc
= get_paddr_from_handle(ctx
, &args
->in
,
1182 memset(args
, 0, sizeof(*args
));
1183 args
->out
.device_virt_addr
= device_addr
;
1186 case HL_MEM_OP_UNMAP
:
1191 dev_err(hdev
->dev
, "Unknown opcode for memory IOCTL\n");
1202 * hl_pin_host_memory - pins a chunk of host memory
1204 * @hdev : pointer to the habanalabs device structure
1205 * @addr : the user-space virtual address of the memory area
1206 * @size : the size of the memory area
1207 * @userptr : pointer to hl_userptr structure
1209 * This function does the following:
1210 * - Pins the physical pages
1211 * - Create a SG list from those pages
1213 int hl_pin_host_memory(struct hl_device
*hdev
, u64 addr
, u64 size
,
1214 struct hl_userptr
*userptr
)
1221 dev_err(hdev
->dev
, "size to pin is invalid - %llu\n", size
);
1225 if (!access_ok((void __user
*) (uintptr_t) addr
, size
)) {
1226 dev_err(hdev
->dev
, "user pointer is invalid - 0x%llx\n", addr
);
1231 * If the combination of the address and size requested for this memory
1232 * region causes an integer overflow, return error.
1234 if (((addr
+ size
) < addr
) ||
1235 PAGE_ALIGN(addr
+ size
) < (addr
+ size
)) {
1237 "user pointer 0x%llx + %llu causes integer overflow\n",
1242 start
= addr
& PAGE_MASK
;
1243 offset
= addr
& ~PAGE_MASK
;
1244 end
= PAGE_ALIGN(addr
+ size
);
1245 npages
= (end
- start
) >> PAGE_SHIFT
;
1247 userptr
->size
= size
;
1248 userptr
->addr
= addr
;
1249 userptr
->dma_mapped
= false;
1250 INIT_LIST_HEAD(&userptr
->job_node
);
1252 userptr
->vec
= frame_vector_create(npages
);
1253 if (!userptr
->vec
) {
1254 dev_err(hdev
->dev
, "Failed to create frame vector\n");
1258 rc
= get_vaddr_frames(start
, npages
, FOLL_FORCE
| FOLL_WRITE
,
1263 "Failed to map host memory, user ptr probably wrong\n");
1265 goto destroy_framevec
;
1270 if (frame_vector_to_pages(userptr
->vec
) < 0) {
1272 "Failed to translate frame vector to pages\n");
1277 userptr
->sgt
= kzalloc(sizeof(*userptr
->sgt
), GFP_ATOMIC
);
1278 if (!userptr
->sgt
) {
1283 rc
= sg_alloc_table_from_pages(userptr
->sgt
,
1284 frame_vector_pages(userptr
->vec
),
1285 npages
, offset
, size
, GFP_ATOMIC
);
1287 dev_err(hdev
->dev
, "failed to create SG table from pages\n");
1291 hl_debugfs_add_userptr(hdev
, userptr
);
1296 kfree(userptr
->sgt
);
1298 put_vaddr_frames(userptr
->vec
);
1300 frame_vector_destroy(userptr
->vec
);
1305 * hl_unpin_host_memory - unpins a chunk of host memory
1307 * @hdev : pointer to the habanalabs device structure
1308 * @userptr : pointer to hl_userptr structure
1310 * This function does the following:
1311 * - Unpins the physical pages related to the host memory
1312 * - Free the SG list
1314 int hl_unpin_host_memory(struct hl_device
*hdev
, struct hl_userptr
*userptr
)
1316 struct page
**pages
;
1318 hl_debugfs_remove_userptr(hdev
, userptr
);
1320 if (userptr
->dma_mapped
)
1321 hdev
->asic_funcs
->hl_dma_unmap_sg(hdev
,
1323 userptr
->sgt
->nents
,
1326 pages
= frame_vector_pages(userptr
->vec
);
1327 if (!IS_ERR(pages
)) {
1330 for (i
= 0; i
< frame_vector_count(userptr
->vec
); i
++)
1331 set_page_dirty_lock(pages
[i
]);
1333 put_vaddr_frames(userptr
->vec
);
1334 frame_vector_destroy(userptr
->vec
);
1336 list_del(&userptr
->job_node
);
1338 sg_free_table(userptr
->sgt
);
1339 kfree(userptr
->sgt
);
1345 * hl_userptr_delete_list - clear userptr list
1347 * @hdev : pointer to the habanalabs device structure
1348 * @userptr_list : pointer to the list to clear
1350 * This function does the following:
1351 * - Iterates over the list and unpins the host memory and frees the userptr
1354 void hl_userptr_delete_list(struct hl_device
*hdev
,
1355 struct list_head
*userptr_list
)
1357 struct hl_userptr
*userptr
, *tmp
;
1359 list_for_each_entry_safe(userptr
, tmp
, userptr_list
, job_node
) {
1360 hl_unpin_host_memory(hdev
, userptr
);
1364 INIT_LIST_HEAD(userptr_list
);
1368 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1370 * @hdev : pointer to the habanalabs device structure
1371 * @userptr_list : pointer to the list to clear
1372 * @userptr : pointer to userptr to check
1374 * This function does the following:
1375 * - Iterates over the list and checks if the given userptr is in it, means is
1376 * pinned. If so, returns true, otherwise returns false.
1378 bool hl_userptr_is_pinned(struct hl_device
*hdev
, u64 addr
,
1379 u32 size
, struct list_head
*userptr_list
,
1380 struct hl_userptr
**userptr
)
1382 list_for_each_entry((*userptr
), userptr_list
, job_node
) {
1383 if ((addr
== (*userptr
)->addr
) && (size
== (*userptr
)->size
))
1391 * hl_va_range_init - initialize virtual addresses range
1393 * @hdev : pointer to the habanalabs device structure
1394 * @va_range : pointer to the range to initialize
1395 * @start : range start address
1396 * @end : range end address
1398 * This function does the following:
1399 * - Initializes the virtual addresses list of the given range with the given
1402 static int hl_va_range_init(struct hl_device
*hdev
,
1403 struct hl_va_range
*va_range
, u64 start
, u64 end
)
1407 INIT_LIST_HEAD(&va_range
->list
);
1409 /* PAGE_SIZE alignment */
1411 if (start
& (PAGE_SIZE
- 1)) {
1416 if (end
& (PAGE_SIZE
- 1))
1420 dev_err(hdev
->dev
, "too small vm range for va list\n");
1424 rc
= add_va_block(hdev
, va_range
, start
, end
);
1427 dev_err(hdev
->dev
, "Failed to init host va list\n");
1431 va_range
->start_addr
= start
;
1432 va_range
->end_addr
= end
;
1438 * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
1440 * @ctx : pointer to the habanalabs context structure
1441 * @host_range_start : host virtual addresses range start
1442 * @host_range_end : host virtual addresses range end
1443 * @dram_range_start : dram virtual addresses range start
1444 * @dram_range_end : dram virtual addresses range end
1446 * This function initializes the following:
1448 * - Virtual address to area descriptor hashtable
1449 * - Virtual block list of available virtual memory
1451 static int hl_vm_ctx_init_with_ranges(struct hl_ctx
*ctx
, u64 host_range_start
,
1452 u64 host_range_end
, u64 dram_range_start
,
1455 struct hl_device
*hdev
= ctx
->hdev
;
1458 rc
= hl_mmu_ctx_init(ctx
);
1460 dev_err(hdev
->dev
, "failed to init context %d\n", ctx
->asid
);
1464 mutex_init(&ctx
->mem_hash_lock
);
1465 hash_init(ctx
->mem_hash
);
1467 mutex_init(&ctx
->host_va_range
.lock
);
1469 rc
= hl_va_range_init(hdev
, &ctx
->host_va_range
, host_range_start
,
1472 dev_err(hdev
->dev
, "failed to init host vm range\n");
1476 mutex_init(&ctx
->dram_va_range
.lock
);
1478 rc
= hl_va_range_init(hdev
, &ctx
->dram_va_range
, dram_range_start
,
1481 dev_err(hdev
->dev
, "failed to init dram vm range\n");
1485 hl_debugfs_add_ctx_mem_hash(hdev
, ctx
);
1490 mutex_destroy(&ctx
->dram_va_range
.lock
);
1492 mutex_lock(&ctx
->host_va_range
.lock
);
1493 clear_va_list_locked(hdev
, &ctx
->host_va_range
.list
);
1494 mutex_unlock(&ctx
->host_va_range
.lock
);
1496 mutex_destroy(&ctx
->host_va_range
.lock
);
1497 mutex_destroy(&ctx
->mem_hash_lock
);
1498 hl_mmu_ctx_fini(ctx
);
1503 int hl_vm_ctx_init(struct hl_ctx
*ctx
)
1505 struct asic_fixed_properties
*prop
= &ctx
->hdev
->asic_prop
;
1506 u64 host_range_start
, host_range_end
, dram_range_start
,
1509 atomic64_set(&ctx
->dram_phys_mem
, 0);
1512 * - If MMU is enabled, init the ranges as usual.
1513 * - If MMU is disabled, in case of host mapping, the returned address
1515 * In case of DRAM mapping, the returned address is the physical
1516 * address of the memory related to the given handle.
1518 if (ctx
->hdev
->mmu_enable
) {
1519 dram_range_start
= prop
->va_space_dram_start_address
;
1520 dram_range_end
= prop
->va_space_dram_end_address
;
1521 host_range_start
= prop
->va_space_host_start_address
;
1522 host_range_end
= prop
->va_space_host_end_address
;
1524 dram_range_start
= prop
->dram_user_base_address
;
1525 dram_range_end
= prop
->dram_end_address
;
1526 host_range_start
= prop
->dram_user_base_address
;
1527 host_range_end
= prop
->dram_end_address
;
1530 return hl_vm_ctx_init_with_ranges(ctx
, host_range_start
, host_range_end
,
1531 dram_range_start
, dram_range_end
);
1535 * hl_va_range_fini - clear a virtual addresses range
1537 * @hdev : pointer to the habanalabs structure
1538 * va_range : pointer to virtual addresses range
1540 * This function initializes the following:
1541 * - Checks that the given range contains the whole initial range
1542 * - Frees the virtual addresses block list and its lock
1544 static void hl_va_range_fini(struct hl_device
*hdev
,
1545 struct hl_va_range
*va_range
)
1547 struct hl_vm_va_block
*va_block
;
1549 if (list_empty(&va_range
->list
)) {
1551 "va list should not be empty on cleanup!\n");
1555 if (!list_is_singular(&va_range
->list
)) {
1557 "va list should not contain multiple blocks on cleanup!\n");
1561 va_block
= list_first_entry(&va_range
->list
, typeof(*va_block
), node
);
1563 if (va_block
->start
!= va_range
->start_addr
||
1564 va_block
->end
!= va_range
->end_addr
) {
1566 "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
1567 va_block
->start
, va_block
->end
);
1572 mutex_lock(&va_range
->lock
);
1573 clear_va_list_locked(hdev
, &va_range
->list
);
1574 mutex_unlock(&va_range
->lock
);
1577 mutex_destroy(&va_range
->lock
);
1581 * hl_vm_ctx_fini - virtual memory teardown of context
1583 * @ctx : pointer to the habanalabs context structure
1585 * This function perform teardown the following:
1586 * - Virtual block list of available virtual memory
1587 * - Virtual address to area descriptor hashtable
1590 * In addition this function does the following:
1591 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1592 * hashtable should be empty as no valid mappings should exist at this
1594 * - Frees any existing physical page list from the idr which relates to the
1595 * current context asid.
1596 * - This function checks the virtual block list for correctness. At this point
1597 * the list should contain one element which describes the whole virtual
1598 * memory range of the context. Otherwise, a warning is printed.
1600 void hl_vm_ctx_fini(struct hl_ctx
*ctx
)
1602 struct hl_device
*hdev
= ctx
->hdev
;
1603 struct hl_vm
*vm
= &hdev
->vm
;
1604 struct hl_vm_phys_pg_pack
*phys_pg_list
;
1605 struct hl_vm_hash_node
*hnode
;
1606 struct hlist_node
*tmp_node
;
1609 hl_debugfs_remove_ctx_mem_hash(hdev
, ctx
);
1611 if (!hash_empty(ctx
->mem_hash
))
1612 dev_notice(hdev
->dev
, "ctx is freed while it has va in use\n");
1614 hash_for_each_safe(ctx
->mem_hash
, i
, tmp_node
, hnode
, node
) {
1616 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1617 hnode
->vaddr
, ctx
->asid
);
1618 unmap_device_va(ctx
, hnode
->vaddr
);
1621 spin_lock(&vm
->idr_lock
);
1622 idr_for_each_entry(&vm
->phys_pg_pack_handles
, phys_pg_list
, i
)
1623 if (phys_pg_list
->asid
== ctx
->asid
) {
1625 "page list 0x%p of asid %d is still alive\n",
1626 phys_pg_list
, ctx
->asid
);
1627 free_phys_pg_pack(hdev
, phys_pg_list
);
1628 idr_remove(&vm
->phys_pg_pack_handles
, i
);
1630 spin_unlock(&vm
->idr_lock
);
1632 hl_va_range_fini(hdev
, &ctx
->dram_va_range
);
1633 hl_va_range_fini(hdev
, &ctx
->host_va_range
);
1635 mutex_destroy(&ctx
->mem_hash_lock
);
1636 hl_mmu_ctx_fini(ctx
);
1640 * hl_vm_init - initialize virtual memory module
1642 * @hdev : pointer to the habanalabs device structure
1644 * This function initializes the following:
1646 * - DRAM physical pages pool of 2MB
1647 * - Idr for device memory allocation handles
1649 int hl_vm_init(struct hl_device
*hdev
)
1651 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
1652 struct hl_vm
*vm
= &hdev
->vm
;
1655 rc
= hl_mmu_init(hdev
);
1657 dev_err(hdev
->dev
, "Failed to init MMU\n");
1661 vm
->dram_pg_pool
= gen_pool_create(__ffs(prop
->dram_page_size
), -1);
1662 if (!vm
->dram_pg_pool
) {
1663 dev_err(hdev
->dev
, "Failed to create dram page pool\n");
1665 goto pool_create_err
;
1668 kref_init(&vm
->dram_pg_pool_refcount
);
1670 rc
= gen_pool_add(vm
->dram_pg_pool
, prop
->dram_user_base_address
,
1671 prop
->dram_end_address
- prop
->dram_user_base_address
,
1676 "Failed to add memory to dram page pool %d\n", rc
);
1680 spin_lock_init(&vm
->idr_lock
);
1681 idr_init(&vm
->phys_pg_pack_handles
);
1683 atomic64_set(&hdev
->dram_used_mem
, 0);
1685 vm
->init_done
= true;
1690 gen_pool_destroy(vm
->dram_pg_pool
);
1698 * hl_vm_fini - virtual memory module teardown
1700 * @hdev : pointer to the habanalabs device structure
1702 * This function perform teardown to the following:
1703 * - Idr for device memory allocation handles
1704 * - DRAM physical pages pool of 2MB
1707 void hl_vm_fini(struct hl_device
*hdev
)
1709 struct hl_vm
*vm
= &hdev
->vm
;
1715 * At this point all the contexts should be freed and hence no DRAM
1716 * memory should be in use. Hence the DRAM pool should be freed here.
1718 if (kref_put(&vm
->dram_pg_pool_refcount
, dram_pg_pool_do_release
) != 1)
1719 dev_warn(hdev
->dev
, "dram_pg_pool was not destroyed on %s\n",
1724 vm
->init_done
= false;