2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
20 * This file contains functions for buffer object structure management
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h> /* for GFP_ATOMIC */
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h> /* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
40 #include <asm/set_memory.h>
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
47 static unsigned int order_to_nr(unsigned int order
)
52 static unsigned int nr_to_order_bottom(unsigned int nr
)
57 static struct hmm_buffer_object
*__bo_alloc(struct kmem_cache
*bo_cache
)
59 struct hmm_buffer_object
*bo
;
61 bo
= kmem_cache_alloc(bo_cache
, GFP_KERNEL
);
63 dev_err(atomisp_dev
, "%s: failed!\n", __func__
);
68 static int __bo_init(struct hmm_bo_device
*bdev
, struct hmm_buffer_object
*bo
,
71 check_bodev_null_return(bdev
, -EINVAL
);
72 var_equal_return(hmm_bo_device_inited(bdev
), 0, -EINVAL
,
73 "hmm_bo_device not inited yet.\n");
74 /* prevent zero size buffer object */
76 dev_err(atomisp_dev
, "0 size buffer is not allowed.\n");
80 memset(bo
, 0, sizeof(*bo
));
81 mutex_init(&bo
->mutex
);
83 /* init the bo->list HEAD as an element of entire_bo_list */
84 INIT_LIST_HEAD(&bo
->list
);
88 bo
->status
= HMM_BO_FREE
;
89 bo
->start
= bdev
->start
;
91 bo
->end
= bo
->start
+ pgnr_to_size(pgnr
);
98 static struct hmm_buffer_object
*__bo_search_and_remove_from_free_rbtree(
99 struct rb_node
*node
, unsigned int pgnr
)
101 struct hmm_buffer_object
*this, *ret_bo
, *temp_bo
;
103 this = rb_entry(node
, struct hmm_buffer_object
, node
);
104 if (this->pgnr
== pgnr
||
105 (this->pgnr
> pgnr
&& this->node
.rb_left
== NULL
)) {
106 goto remove_bo_and_return
;
108 if (this->pgnr
< pgnr
) {
109 if (!this->node
.rb_right
)
111 ret_bo
= __bo_search_and_remove_from_free_rbtree(
112 this->node
.rb_right
, pgnr
);
114 ret_bo
= __bo_search_and_remove_from_free_rbtree(
115 this->node
.rb_left
, pgnr
);
118 if (this->pgnr
> pgnr
)
119 goto remove_bo_and_return
;
126 remove_bo_and_return
:
127 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
128 * 1. check if 'this->next' is NULL:
129 * yes: erase 'this' node and rebalance rbtree, return 'this'.
131 if (this->next
== NULL
) {
132 rb_erase(&this->node
, &this->bdev
->free_rbtree
);
135 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
136 * 2. check if 'this->next->next' is NULL:
137 * yes: change the related 'next/prev' pointer,
138 * return 'this->next' but the rbtree stays unchanged.
140 temp_bo
= this->next
;
141 this->next
= temp_bo
->next
;
143 temp_bo
->next
->prev
= this;
144 temp_bo
->next
= NULL
;
145 temp_bo
->prev
= NULL
;
149 static struct hmm_buffer_object
*__bo_search_by_addr(struct rb_root
*root
,
152 struct rb_node
*n
= root
->rb_node
;
153 struct hmm_buffer_object
*bo
;
156 bo
= rb_entry(n
, struct hmm_buffer_object
, node
);
158 if (bo
->start
> start
) {
159 if (n
->rb_left
== NULL
)
162 } else if (bo
->start
< start
) {
163 if (n
->rb_right
== NULL
)
174 static struct hmm_buffer_object
*__bo_search_by_addr_in_range(
175 struct rb_root
*root
, unsigned int start
)
177 struct rb_node
*n
= root
->rb_node
;
178 struct hmm_buffer_object
*bo
;
181 bo
= rb_entry(n
, struct hmm_buffer_object
, node
);
183 if (bo
->start
> start
) {
184 if (n
->rb_left
== NULL
)
190 if (n
->rb_right
== NULL
)
199 static void __bo_insert_to_free_rbtree(struct rb_root
*root
,
200 struct hmm_buffer_object
*bo
)
202 struct rb_node
**new = &(root
->rb_node
);
203 struct rb_node
*parent
= NULL
;
204 struct hmm_buffer_object
*this;
205 unsigned int pgnr
= bo
->pgnr
;
209 this = container_of(*new, struct hmm_buffer_object
, node
);
211 if (pgnr
< this->pgnr
) {
212 new = &((*new)->rb_left
);
213 } else if (pgnr
> this->pgnr
) {
214 new = &((*new)->rb_right
);
217 bo
->next
= this->next
;
219 this->next
->prev
= bo
;
221 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_FREE
;
226 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_FREE
;
228 rb_link_node(&bo
->node
, parent
, new);
229 rb_insert_color(&bo
->node
, root
);
232 static void __bo_insert_to_alloc_rbtree(struct rb_root
*root
,
233 struct hmm_buffer_object
*bo
)
235 struct rb_node
**new = &(root
->rb_node
);
236 struct rb_node
*parent
= NULL
;
237 struct hmm_buffer_object
*this;
238 unsigned int start
= bo
->start
;
242 this = container_of(*new, struct hmm_buffer_object
, node
);
244 if (start
< this->start
)
245 new = &((*new)->rb_left
);
247 new = &((*new)->rb_right
);
250 kref_init(&bo
->kref
);
251 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_ALLOCED
;
253 rb_link_node(&bo
->node
, parent
, new);
254 rb_insert_color(&bo
->node
, root
);
257 static struct hmm_buffer_object
*__bo_break_up(struct hmm_bo_device
*bdev
,
258 struct hmm_buffer_object
*bo
,
261 struct hmm_buffer_object
*new_bo
;
265 new_bo
= __bo_alloc(bdev
->bo_cache
);
267 dev_err(atomisp_dev
, "%s: __bo_alloc failed!\n", __func__
);
270 ret
= __bo_init(bdev
, new_bo
, pgnr
);
272 dev_err(atomisp_dev
, "%s: __bo_init failed!\n", __func__
);
273 kmem_cache_free(bdev
->bo_cache
, new_bo
);
277 new_bo
->start
= bo
->start
;
278 new_bo
->end
= new_bo
->start
+ pgnr_to_size(pgnr
);
279 bo
->start
= new_bo
->end
;
280 bo
->pgnr
= bo
->pgnr
- pgnr
;
282 spin_lock_irqsave(&bdev
->list_lock
, flags
);
283 list_add_tail(&new_bo
->list
, &bo
->list
);
284 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
289 static void __bo_take_off_handling(struct hmm_buffer_object
*bo
)
291 struct hmm_bo_device
*bdev
= bo
->bdev
;
292 /* There are 4 situations when we take off a known bo from free rbtree:
293 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
294 * and does not have a linked list after bo, to take off this bo,
295 * we just need erase bo directly and rebalance the free rbtree
297 if (bo
->prev
== NULL
&& bo
->next
== NULL
) {
298 rb_erase(&bo
->node
, &bdev
->free_rbtree
);
299 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
300 * and has a linked list,to take off this bo we need erase bo
301 * first, then, insert bo->next into free rbtree and rebalance
304 } else if (bo
->prev
== NULL
&& bo
->next
!= NULL
) {
305 bo
->next
->prev
= NULL
;
306 rb_erase(&bo
->node
, &bdev
->free_rbtree
);
307 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
->next
);
309 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
310 * node, bo is the last element of the linked list after rbtree
311 * node, to take off this bo, we just need set the "prev/next"
312 * pointers to NULL, the free rbtree stays unchaged
314 } else if (bo
->prev
!= NULL
&& bo
->next
== NULL
) {
315 bo
->prev
->next
= NULL
;
317 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
318 * node, bo is in the middle of the linked list after rbtree node,
319 * to take off this bo, we just set take the "prev/next" pointers
320 * to NULL, the free rbtree stays unchaged
323 bo
->next
->prev
= bo
->prev
;
324 bo
->prev
->next
= bo
->next
;
330 static struct hmm_buffer_object
*__bo_merge(struct hmm_buffer_object
*bo
,
331 struct hmm_buffer_object
*next_bo
)
333 struct hmm_bo_device
*bdev
;
337 next_bo
->start
= bo
->start
;
338 next_bo
->pgnr
= next_bo
->pgnr
+ bo
->pgnr
;
340 spin_lock_irqsave(&bdev
->list_lock
, flags
);
342 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
344 kmem_cache_free(bo
->bdev
->bo_cache
, bo
);
350 * hmm_bo_device functions.
352 int hmm_bo_device_init(struct hmm_bo_device
*bdev
,
353 struct isp_mmu_client
*mmu_driver
,
354 unsigned int vaddr_start
,
357 struct hmm_buffer_object
*bo
;
361 check_bodev_null_return(bdev
, -EINVAL
);
363 ret
= isp_mmu_init(&bdev
->mmu
, mmu_driver
);
365 dev_err(atomisp_dev
, "isp_mmu_init failed.\n");
369 bdev
->start
= vaddr_start
;
370 bdev
->pgnr
= size_to_pgnr_ceil(size
);
371 bdev
->size
= pgnr_to_size(bdev
->pgnr
);
373 spin_lock_init(&bdev
->list_lock
);
374 mutex_init(&bdev
->rbtree_mutex
);
376 bdev
->flag
= HMM_BO_DEVICE_INITED
;
378 INIT_LIST_HEAD(&bdev
->entire_bo_list
);
379 bdev
->allocated_rbtree
= RB_ROOT
;
380 bdev
->free_rbtree
= RB_ROOT
;
382 bdev
->bo_cache
= kmem_cache_create("bo_cache",
383 sizeof(struct hmm_buffer_object
), 0, 0, NULL
);
384 if (!bdev
->bo_cache
) {
385 dev_err(atomisp_dev
, "%s: create cache failed!\n", __func__
);
386 isp_mmu_exit(&bdev
->mmu
);
390 bo
= __bo_alloc(bdev
->bo_cache
);
392 dev_err(atomisp_dev
, "%s: __bo_alloc failed!\n", __func__
);
393 isp_mmu_exit(&bdev
->mmu
);
397 ret
= __bo_init(bdev
, bo
, bdev
->pgnr
);
399 dev_err(atomisp_dev
, "%s: __bo_init failed!\n", __func__
);
400 kmem_cache_free(bdev
->bo_cache
, bo
);
401 isp_mmu_exit(&bdev
->mmu
);
405 spin_lock_irqsave(&bdev
->list_lock
, flags
);
406 list_add_tail(&bo
->list
, &bdev
->entire_bo_list
);
407 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
409 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
414 struct hmm_buffer_object
*hmm_bo_alloc(struct hmm_bo_device
*bdev
,
417 struct hmm_buffer_object
*bo
, *new_bo
;
418 struct rb_root
*root
= &bdev
->free_rbtree
;
420 check_bodev_null_return(bdev
, NULL
);
421 var_equal_return(hmm_bo_device_inited(bdev
), 0, NULL
,
422 "hmm_bo_device not inited yet.\n");
425 dev_err(atomisp_dev
, "0 size buffer is not allowed.\n");
429 mutex_lock(&bdev
->rbtree_mutex
);
430 bo
= __bo_search_and_remove_from_free_rbtree(root
->rb_node
, pgnr
);
432 mutex_unlock(&bdev
->rbtree_mutex
);
433 dev_err(atomisp_dev
, "%s: Out of Memory! hmm_bo_alloc failed",
438 if (bo
->pgnr
> pgnr
) {
439 new_bo
= __bo_break_up(bdev
, bo
, pgnr
);
441 mutex_unlock(&bdev
->rbtree_mutex
);
442 dev_err(atomisp_dev
, "%s: __bo_break_up failed!\n",
447 __bo_insert_to_alloc_rbtree(&bdev
->allocated_rbtree
, new_bo
);
448 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
450 mutex_unlock(&bdev
->rbtree_mutex
);
454 __bo_insert_to_alloc_rbtree(&bdev
->allocated_rbtree
, bo
);
456 mutex_unlock(&bdev
->rbtree_mutex
);
460 void hmm_bo_release(struct hmm_buffer_object
*bo
)
462 struct hmm_bo_device
*bdev
= bo
->bdev
;
463 struct hmm_buffer_object
*next_bo
, *prev_bo
;
465 mutex_lock(&bdev
->rbtree_mutex
);
470 * how to destroy the bo when it is stilled MMAPED?
472 * ideally, this will not happened as hmm_bo_release
473 * will only be called when kref reaches 0, and in mmap
474 * operation the hmm_bo_ref will eventually be called.
475 * so, if this happened, something goes wrong.
477 if (bo
->status
& HMM_BO_MMAPED
) {
478 mutex_unlock(&bdev
->rbtree_mutex
);
479 dev_dbg(atomisp_dev
, "destroy bo which is MMAPED, do nothing\n");
483 if (bo
->status
& HMM_BO_BINDED
) {
484 dev_warn(atomisp_dev
, "the bo is still binded, unbind it first...\n");
488 if (bo
->status
& HMM_BO_PAGE_ALLOCED
) {
489 dev_warn(atomisp_dev
, "the pages is not freed, free pages first\n");
490 hmm_bo_free_pages(bo
);
492 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
493 dev_warn(atomisp_dev
, "the vunmap is not done, do it...\n");
497 rb_erase(&bo
->node
, &bdev
->allocated_rbtree
);
499 prev_bo
= list_entry(bo
->list
.prev
, struct hmm_buffer_object
, list
);
500 next_bo
= list_entry(bo
->list
.next
, struct hmm_buffer_object
, list
);
502 if (bo
->list
.prev
!= &bdev
->entire_bo_list
&&
503 prev_bo
->end
== bo
->start
&&
504 (prev_bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
) {
505 __bo_take_off_handling(prev_bo
);
506 bo
= __bo_merge(prev_bo
, bo
);
509 if (bo
->list
.next
!= &bdev
->entire_bo_list
&&
510 next_bo
->start
== bo
->end
&&
511 (next_bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
) {
512 __bo_take_off_handling(next_bo
);
513 bo
= __bo_merge(bo
, next_bo
);
516 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
518 mutex_unlock(&bdev
->rbtree_mutex
);
522 void hmm_bo_device_exit(struct hmm_bo_device
*bdev
)
524 struct hmm_buffer_object
*bo
;
527 dev_dbg(atomisp_dev
, "%s: entering!\n", __func__
);
529 check_bodev_null_return_void(bdev
);
532 * release all allocated bos even they a in use
533 * and all bos will be merged into a big bo
535 while (!RB_EMPTY_ROOT(&bdev
->allocated_rbtree
))
537 rbtree_node_to_hmm_bo(bdev
->allocated_rbtree
.rb_node
));
539 dev_dbg(atomisp_dev
, "%s: finished releasing all allocated bos!\n",
542 /* free all bos to release all ISP virtual memory */
543 while (!list_empty(&bdev
->entire_bo_list
)) {
544 bo
= list_to_hmm_bo(bdev
->entire_bo_list
.next
);
546 spin_lock_irqsave(&bdev
->list_lock
, flags
);
548 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
550 kmem_cache_free(bdev
->bo_cache
, bo
);
553 dev_dbg(atomisp_dev
, "%s: finished to free all bos!\n", __func__
);
555 kmem_cache_destroy(bdev
->bo_cache
);
557 isp_mmu_exit(&bdev
->mmu
);
560 int hmm_bo_device_inited(struct hmm_bo_device
*bdev
)
562 check_bodev_null_return(bdev
, -EINVAL
);
564 return bdev
->flag
== HMM_BO_DEVICE_INITED
;
567 int hmm_bo_allocated(struct hmm_buffer_object
*bo
)
569 check_bo_null_return(bo
, 0);
571 return bo
->status
& HMM_BO_ALLOCED
;
574 struct hmm_buffer_object
*hmm_bo_device_search_start(
575 struct hmm_bo_device
*bdev
, ia_css_ptr vaddr
)
577 struct hmm_buffer_object
*bo
;
579 check_bodev_null_return(bdev
, NULL
);
581 mutex_lock(&bdev
->rbtree_mutex
);
582 bo
= __bo_search_by_addr(&bdev
->allocated_rbtree
, vaddr
);
584 mutex_unlock(&bdev
->rbtree_mutex
);
585 dev_err(atomisp_dev
, "%s can not find bo with addr: 0x%x\n",
589 mutex_unlock(&bdev
->rbtree_mutex
);
594 struct hmm_buffer_object
*hmm_bo_device_search_in_range(
595 struct hmm_bo_device
*bdev
, unsigned int vaddr
)
597 struct hmm_buffer_object
*bo
;
599 check_bodev_null_return(bdev
, NULL
);
601 mutex_lock(&bdev
->rbtree_mutex
);
602 bo
= __bo_search_by_addr_in_range(&bdev
->allocated_rbtree
, vaddr
);
604 mutex_unlock(&bdev
->rbtree_mutex
);
605 dev_err(atomisp_dev
, "%s can not find bo contain addr: 0x%x\n",
609 mutex_unlock(&bdev
->rbtree_mutex
);
614 struct hmm_buffer_object
*hmm_bo_device_search_vmap_start(
615 struct hmm_bo_device
*bdev
, const void *vaddr
)
617 struct list_head
*pos
;
618 struct hmm_buffer_object
*bo
;
621 check_bodev_null_return(bdev
, NULL
);
623 spin_lock_irqsave(&bdev
->list_lock
, flags
);
624 list_for_each(pos
, &bdev
->entire_bo_list
) {
625 bo
= list_to_hmm_bo(pos
);
626 /* pass bo which has no vm_node allocated */
627 if ((bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
)
629 if (bo
->vmap_addr
== vaddr
)
632 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
635 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
641 static void free_private_bo_pages(struct hmm_buffer_object
*bo
,
642 struct hmm_pool
*dypool
,
643 struct hmm_pool
*repool
,
648 for (i
= 0; i
< free_pgnr
; i
++) {
649 switch (bo
->page_obj
[i
].type
) {
650 case HMM_PAGE_TYPE_RESERVED
:
652 && repool
->pops
->pool_free_pages
) {
653 repool
->pops
->pool_free_pages(repool
->pool_info
,
655 hmm_mem_stat
.res_cnt
--;
659 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
660 * memory, so when free them, they should be put into dynamic
663 case HMM_PAGE_TYPE_DYNAMIC
:
664 case HMM_PAGE_TYPE_GENERAL
:
666 && dypool
->pops
->pool_inited
667 && dypool
->pops
->pool_inited(dypool
->pool_info
)) {
668 if (dypool
->pops
->pool_free_pages
)
669 dypool
->pops
->pool_free_pages(
676 * if dynamic memory pool doesn't exist, need to free
677 * pages to system directly.
680 ret
= set_pages_wb(bo
->page_obj
[i
].page
, 1);
683 "set page to WB err ...ret = %d\n",
686 W/A: set_pages_wb seldom return value = -EFAULT
687 indicate that address of page is not in valid
688 range(0xffff880000000000~0xffffc7ffffffffff)
689 then, _free_pages would panic; Do not know why page
690 address be valid,it maybe memory corruption by lowmemory
693 __free_pages(bo
->page_obj
[i
].page
, 0);
694 hmm_mem_stat
.sys_size
--;
703 /*Allocate pages which will be used only by ISP*/
704 static int alloc_private_pages(struct hmm_buffer_object
*bo
,
707 struct hmm_pool
*dypool
,
708 struct hmm_pool
*repool
)
711 unsigned int pgnr
, order
, blk_pgnr
, alloc_pgnr
;
713 gfp_t gfp
= GFP_NOWAIT
| __GFP_NOWARN
; /* REVISIT: need __GFP_FS too? */
715 int failure_number
= 0;
716 bool reduce_order
= false;
717 bool lack_mem
= true;
720 gfp
|= __GFP_HIGHMEM
;
724 bo
->page_obj
= kmalloc_array(pgnr
, sizeof(struct hmm_page_object
),
726 if (unlikely(!bo
->page_obj
))
733 * get physical pages from dynamic pages pool.
735 if (dypool
->pops
&& dypool
->pops
->pool_alloc_pages
) {
736 alloc_pgnr
= dypool
->pops
->pool_alloc_pages(dypool
->pool_info
,
739 hmm_mem_stat
.dyc_size
-= alloc_pgnr
;
741 if (alloc_pgnr
== pgnr
)
749 * get physical pages from reserved pages pool for atomisp.
751 if (repool
->pops
&& repool
->pops
->pool_alloc_pages
) {
752 alloc_pgnr
= repool
->pops
->pool_alloc_pages(repool
->pool_info
,
753 &bo
->page_obj
[i
], pgnr
,
755 hmm_mem_stat
.res_cnt
+= alloc_pgnr
;
756 if (alloc_pgnr
== pgnr
)
764 order
= nr_to_order_bottom(pgnr
);
766 * if be short of memory, we will set order to 0
770 order
= HMM_MIN_ORDER
;
771 else if (order
> HMM_MAX_ORDER
)
772 order
= HMM_MAX_ORDER
;
775 * When order > HMM_MIN_ORDER, for performance reasons we don't
776 * want alloc_pages() to sleep. In case it fails and fallbacks
777 * to HMM_MIN_ORDER or in case the requested order is originally
778 * the minimum value, we can allow alloc_pages() to sleep for
779 * robustness purpose.
781 * REVISIT: why __GFP_FS is necessary?
783 if (order
== HMM_MIN_ORDER
) {
785 gfp
|= __GFP_RECLAIM
| __GFP_FS
;
788 pages
= alloc_pages(gfp
, order
);
789 if (unlikely(!pages
)) {
791 * in low memory case, if allocation page fails,
792 * we turn to try if order=0 allocation could
793 * succeed. if order=0 fails too, that means there is
796 if (order
== HMM_MIN_ORDER
) {
798 "%s: cannot allocate pages\n",
802 order
= HMM_MIN_ORDER
;
806 * if fail two times continuously, we think be short
809 if (failure_number
== 2) {
815 blk_pgnr
= order_to_nr(order
);
819 * set memory to uncacheable -- UC_MINUS
821 ret
= set_pages_uc(pages
, blk_pgnr
);
824 "set page uncacheable"
827 __free_pages(pages
, order
);
833 for (j
= 0; j
< blk_pgnr
; j
++) {
834 bo
->page_obj
[i
].page
= pages
+ j
;
835 bo
->page_obj
[i
++].type
= HMM_PAGE_TYPE_GENERAL
;
839 hmm_mem_stat
.sys_size
+= blk_pgnr
;
842 * if order is not reduced this time, clear
846 reduce_order
= false;
855 free_private_bo_pages(bo
, dypool
, repool
, alloc_pgnr
);
862 static void free_private_pages(struct hmm_buffer_object
*bo
,
863 struct hmm_pool
*dypool
,
864 struct hmm_pool
*repool
)
866 free_private_bo_pages(bo
, dypool
, repool
, bo
->pgnr
);
872 * Hacked from kernel function __get_user_pages in mm/memory.c
874 * Handle buffers allocated by other kernel space driver and mmaped into user
875 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
877 * Get physical pages from user space virtual address and update into page list
879 static int __get_pfnmap_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
880 unsigned long start
, int nr_pages
,
881 unsigned int gup_flags
, struct page
**pages
,
882 struct vm_area_struct
**vmas
)
885 unsigned long vm_flags
;
890 VM_BUG_ON(!!pages
!= !!(gup_flags
& FOLL_GET
));
893 * Require read or write permissions.
894 * If FOLL_FORCE is set, we only require the "MAY" flags.
896 vm_flags
= (gup_flags
& FOLL_WRITE
) ?
897 (VM_WRITE
| VM_MAYWRITE
) : (VM_READ
| VM_MAYREAD
);
898 vm_flags
&= (gup_flags
& FOLL_FORCE
) ?
899 (VM_MAYREAD
| VM_MAYWRITE
) : (VM_READ
| VM_WRITE
);
903 struct vm_area_struct
*vma
;
905 vma
= find_vma(mm
, start
);
907 dev_err(atomisp_dev
, "find_vma failed\n");
908 return i
? : -EFAULT
;
911 if (is_vm_hugetlb_page(vma
)) {
913 i = follow_hugetlb_page(mm, vma, pages, vmas,
914 &start, &nr_pages, i, gup_flags);
924 * If we have a pending SIGKILL, don't keep faulting
925 * pages and potentially allocating memory.
927 if (unlikely(fatal_signal_pending(current
))) {
929 "fatal_signal_pending in %s\n",
931 return i
? i
: -ERESTARTSYS
;
934 ret
= follow_pfn(vma
, start
, &pfn
);
936 dev_err(atomisp_dev
, "follow_pfn() failed\n");
937 return i
? : -EFAULT
;
940 page
= pfn_to_page(pfn
);
942 return i
? i
: PTR_ERR(page
);
946 flush_anon_page(vma
, page
, start
);
947 flush_dcache_page(page
);
954 } while (nr_pages
&& start
< vma
->vm_end
);
960 static int get_pfnmap_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
961 unsigned long start
, int nr_pages
, int write
, int force
,
962 struct page
**pages
, struct vm_area_struct
**vmas
)
964 int flags
= FOLL_TOUCH
;
973 return __get_pfnmap_pages(tsk
, mm
, start
, nr_pages
, flags
, pages
, vmas
);
977 * Convert user space virtual address into pages list
979 static int alloc_user_pages(struct hmm_buffer_object
*bo
,
980 void *userptr
, bool cached
)
984 struct vm_area_struct
*vma
;
987 pages
= kmalloc_array(bo
->pgnr
, sizeof(struct page
*), GFP_KERNEL
);
988 if (unlikely(!pages
))
991 bo
->page_obj
= kmalloc_array(bo
->pgnr
, sizeof(struct hmm_page_object
),
993 if (unlikely(!bo
->page_obj
)) {
998 mutex_unlock(&bo
->mutex
);
999 down_read(¤t
->mm
->mmap_sem
);
1000 vma
= find_vma(current
->mm
, (unsigned long)userptr
);
1001 up_read(¤t
->mm
->mmap_sem
);
1003 dev_err(atomisp_dev
, "find_vma failed\n");
1004 kfree(bo
->page_obj
);
1006 mutex_lock(&bo
->mutex
);
1009 mutex_lock(&bo
->mutex
);
1011 * Handle frame buffer allocated in other kerenl space driver
1012 * and map to user space
1014 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) {
1015 page_nr
= get_pfnmap_pages(current
, current
->mm
,
1016 (unsigned long)userptr
,
1017 (int)(bo
->pgnr
), 1, 0,
1019 bo
->mem_type
= HMM_BO_MEM_TYPE_PFN
;
1021 /*Handle frame buffer allocated in user space*/
1022 mutex_unlock(&bo
->mutex
);
1023 page_nr
= get_user_pages_fast((unsigned long)userptr
,
1024 (int)(bo
->pgnr
), 1, pages
);
1025 mutex_lock(&bo
->mutex
);
1026 bo
->mem_type
= HMM_BO_MEM_TYPE_USER
;
1029 /* can be written by caller, not forced */
1030 if (page_nr
!= bo
->pgnr
) {
1031 dev_err(atomisp_dev
,
1032 "get_user_pages err: bo->pgnr = %d, "
1033 "pgnr actually pinned = %d.\n",
1038 for (i
= 0; i
< bo
->pgnr
; i
++) {
1039 bo
->page_obj
[i
].page
= pages
[i
];
1040 bo
->page_obj
[i
].type
= HMM_PAGE_TYPE_GENERAL
;
1042 hmm_mem_stat
.usr_size
+= bo
->pgnr
;
1048 for (i
= 0; i
< page_nr
; i
++)
1051 kfree(bo
->page_obj
);
1056 static void free_user_pages(struct hmm_buffer_object
*bo
)
1060 for (i
= 0; i
< bo
->pgnr
; i
++)
1061 put_page(bo
->page_obj
[i
].page
);
1062 hmm_mem_stat
.usr_size
-= bo
->pgnr
;
1064 kfree(bo
->page_obj
);
1068 * allocate/free physical pages for the bo.
1070 * type indicate where are the pages from. currently we have 3 types
1071 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1073 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1074 * try to alloc memory from highmem if from_highmem is set.
1076 * userptr is only valid when type is HMM_BO_USER, it indicates
1077 * the start address from user space task.
1079 * from_highmem and userptr will both be ignored when type is
1082 int hmm_bo_alloc_pages(struct hmm_buffer_object
*bo
,
1083 enum hmm_bo_type type
, int from_highmem
,
1084 void *userptr
, bool cached
)
1088 check_bo_null_return(bo
, -EINVAL
);
1090 mutex_lock(&bo
->mutex
);
1091 check_bo_status_no_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1095 * add HMM_BO_USER type
1097 if (type
== HMM_BO_PRIVATE
) {
1098 ret
= alloc_private_pages(bo
, from_highmem
,
1099 cached
, &dynamic_pool
, &reserved_pool
);
1100 } else if (type
== HMM_BO_USER
) {
1101 ret
= alloc_user_pages(bo
, userptr
, cached
);
1103 dev_err(atomisp_dev
, "invalid buffer type.\n");
1111 bo
->status
|= HMM_BO_PAGE_ALLOCED
;
1113 mutex_unlock(&bo
->mutex
);
1118 mutex_unlock(&bo
->mutex
);
1119 dev_err(atomisp_dev
, "alloc pages err...\n");
1122 mutex_unlock(&bo
->mutex
);
1123 dev_err(atomisp_dev
,
1124 "buffer object has already page allocated.\n");
1129 * free physical pages of the bo.
1131 void hmm_bo_free_pages(struct hmm_buffer_object
*bo
)
1133 check_bo_null_return_void(bo
);
1135 mutex_lock(&bo
->mutex
);
1137 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err2
);
1139 /* clear the flag anyway. */
1140 bo
->status
&= (~HMM_BO_PAGE_ALLOCED
);
1142 if (bo
->type
== HMM_BO_PRIVATE
)
1143 free_private_pages(bo
, &dynamic_pool
, &reserved_pool
);
1144 else if (bo
->type
== HMM_BO_USER
)
1145 free_user_pages(bo
);
1147 dev_err(atomisp_dev
, "invalid buffer type.\n");
1148 mutex_unlock(&bo
->mutex
);
1153 mutex_unlock(&bo
->mutex
);
1154 dev_err(atomisp_dev
,
1155 "buffer object not page allocated yet.\n");
1158 int hmm_bo_page_allocated(struct hmm_buffer_object
*bo
)
1160 check_bo_null_return(bo
, 0);
1162 return bo
->status
& HMM_BO_PAGE_ALLOCED
;
1166 * get physical page info of the bo.
1168 int hmm_bo_get_page_info(struct hmm_buffer_object
*bo
,
1169 struct hmm_page_object
**page_obj
, int *pgnr
)
1171 check_bo_null_return(bo
, -EINVAL
);
1173 mutex_lock(&bo
->mutex
);
1175 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1177 *page_obj
= bo
->page_obj
;
1180 mutex_unlock(&bo
->mutex
);
1185 dev_err(atomisp_dev
,
1186 "buffer object not page allocated yet.\n");
1187 mutex_unlock(&bo
->mutex
);
1192 * bind the physical pages to a virtual address space.
1194 int hmm_bo_bind(struct hmm_buffer_object
*bo
)
1198 struct hmm_bo_device
*bdev
;
1201 check_bo_null_return(bo
, -EINVAL
);
1203 mutex_lock(&bo
->mutex
);
1205 check_bo_status_yes_goto(bo
,
1206 HMM_BO_PAGE_ALLOCED
| HMM_BO_ALLOCED
,
1209 check_bo_status_no_goto(bo
, HMM_BO_BINDED
, status_err2
);
1215 for (i
= 0; i
< bo
->pgnr
; i
++) {
1217 isp_mmu_map(&bdev
->mmu
, virt
,
1218 page_to_phys(bo
->page_obj
[i
].page
), 1);
1221 virt
+= (1 << PAGE_SHIFT
);
1227 * theoretically, we donot need to flush TLB as we didnot change
1228 * any existed address mappings, but for Silicon Hive's MMU, its
1229 * really a bug here. I guess when fetching PTEs (page table entity)
1230 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1231 * for performance issue. EX, we only set up 1 page address mapping,
1232 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1233 * so the additional 3 PTEs are invalid.
1235 if (bo
->start
!= 0x0)
1236 isp_mmu_flush_tlb_range(&bdev
->mmu
, bo
->start
,
1237 (bo
->pgnr
<< PAGE_SHIFT
));
1239 bo
->status
|= HMM_BO_BINDED
;
1241 mutex_unlock(&bo
->mutex
);
1246 /* unbind the physical pages with related virtual address space */
1248 for ( ; i
> 0; i
--) {
1249 isp_mmu_unmap(&bdev
->mmu
, virt
, 1);
1250 virt
+= pgnr_to_size(1);
1253 mutex_unlock(&bo
->mutex
);
1254 dev_err(atomisp_dev
,
1255 "setup MMU address mapping failed.\n");
1259 mutex_unlock(&bo
->mutex
);
1260 dev_err(atomisp_dev
, "buffer object already binded.\n");
1263 mutex_unlock(&bo
->mutex
);
1264 dev_err(atomisp_dev
,
1265 "buffer object vm_node or page not allocated.\n");
1270 * unbind the physical pages with related virtual address space.
1272 void hmm_bo_unbind(struct hmm_buffer_object
*bo
)
1275 struct hmm_bo_device
*bdev
;
1278 check_bo_null_return_void(bo
);
1280 mutex_lock(&bo
->mutex
);
1282 check_bo_status_yes_goto(bo
,
1283 HMM_BO_PAGE_ALLOCED
|
1285 HMM_BO_BINDED
, status_err
);
1291 for (i
= 0; i
< bo
->pgnr
; i
++) {
1292 isp_mmu_unmap(&bdev
->mmu
, virt
, 1);
1293 virt
+= pgnr_to_size(1);
1297 * flush TLB as the address mapping has been removed and
1298 * related TLBs should be invalidated.
1300 isp_mmu_flush_tlb_range(&bdev
->mmu
, bo
->start
,
1301 (bo
->pgnr
<< PAGE_SHIFT
));
1303 bo
->status
&= (~HMM_BO_BINDED
);
1305 mutex_unlock(&bo
->mutex
);
1310 mutex_unlock(&bo
->mutex
);
1311 dev_err(atomisp_dev
,
1312 "buffer vm or page not allocated or not binded yet.\n");
1315 int hmm_bo_binded(struct hmm_buffer_object
*bo
)
1319 check_bo_null_return(bo
, 0);
1321 mutex_lock(&bo
->mutex
);
1323 ret
= bo
->status
& HMM_BO_BINDED
;
1325 mutex_unlock(&bo
->mutex
);
1330 void *hmm_bo_vmap(struct hmm_buffer_object
*bo
, bool cached
)
1332 struct page
**pages
;
1335 check_bo_null_return(bo
, NULL
);
1337 mutex_lock(&bo
->mutex
);
1338 if (((bo
->status
& HMM_BO_VMAPED
) && !cached
) ||
1339 ((bo
->status
& HMM_BO_VMAPED_CACHED
) && cached
)) {
1340 mutex_unlock(&bo
->mutex
);
1341 return bo
->vmap_addr
;
1344 /* cached status need to be changed, so vunmap first */
1345 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
1346 vunmap(bo
->vmap_addr
);
1347 bo
->vmap_addr
= NULL
;
1348 bo
->status
&= ~(HMM_BO_VMAPED
| HMM_BO_VMAPED_CACHED
);
1351 pages
= kmalloc_array(bo
->pgnr
, sizeof(*pages
), GFP_KERNEL
);
1352 if (unlikely(!pages
)) {
1353 mutex_unlock(&bo
->mutex
);
1357 for (i
= 0; i
< bo
->pgnr
; i
++)
1358 pages
[i
] = bo
->page_obj
[i
].page
;
1360 bo
->vmap_addr
= vmap(pages
, bo
->pgnr
, VM_MAP
,
1361 cached
? PAGE_KERNEL
: PAGE_KERNEL_NOCACHE
);
1362 if (unlikely(!bo
->vmap_addr
)) {
1364 mutex_unlock(&bo
->mutex
);
1365 dev_err(atomisp_dev
, "vmap failed...\n");
1368 bo
->status
|= (cached
? HMM_BO_VMAPED_CACHED
: HMM_BO_VMAPED
);
1372 mutex_unlock(&bo
->mutex
);
1373 return bo
->vmap_addr
;
1376 void hmm_bo_flush_vmap(struct hmm_buffer_object
*bo
)
1378 check_bo_null_return_void(bo
);
1380 mutex_lock(&bo
->mutex
);
1381 if (!(bo
->status
& HMM_BO_VMAPED_CACHED
) || !bo
->vmap_addr
) {
1382 mutex_unlock(&bo
->mutex
);
1386 clflush_cache_range(bo
->vmap_addr
, bo
->pgnr
* PAGE_SIZE
);
1387 mutex_unlock(&bo
->mutex
);
1390 void hmm_bo_vunmap(struct hmm_buffer_object
*bo
)
1392 check_bo_null_return_void(bo
);
1394 mutex_lock(&bo
->mutex
);
1395 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
1396 vunmap(bo
->vmap_addr
);
1397 bo
->vmap_addr
= NULL
;
1398 bo
->status
&= ~(HMM_BO_VMAPED
| HMM_BO_VMAPED_CACHED
);
1401 mutex_unlock(&bo
->mutex
);
1405 void hmm_bo_ref(struct hmm_buffer_object
*bo
)
1407 check_bo_null_return_void(bo
);
1409 kref_get(&bo
->kref
);
1412 static void kref_hmm_bo_release(struct kref
*kref
)
1417 hmm_bo_release(kref_to_hmm_bo(kref
));
1420 void hmm_bo_unref(struct hmm_buffer_object
*bo
)
1422 check_bo_null_return_void(bo
);
1424 kref_put(&bo
->kref
, kref_hmm_bo_release
);
1427 static void hmm_bo_vm_open(struct vm_area_struct
*vma
)
1429 struct hmm_buffer_object
*bo
=
1430 (struct hmm_buffer_object
*)vma
->vm_private_data
;
1432 check_bo_null_return_void(bo
);
1436 mutex_lock(&bo
->mutex
);
1438 bo
->status
|= HMM_BO_MMAPED
;
1442 mutex_unlock(&bo
->mutex
);
1445 static void hmm_bo_vm_close(struct vm_area_struct
*vma
)
1447 struct hmm_buffer_object
*bo
=
1448 (struct hmm_buffer_object
*)vma
->vm_private_data
;
1450 check_bo_null_return_void(bo
);
1454 mutex_lock(&bo
->mutex
);
1458 if (!bo
->mmap_count
) {
1459 bo
->status
&= (~HMM_BO_MMAPED
);
1460 vma
->vm_private_data
= NULL
;
1463 mutex_unlock(&bo
->mutex
);
1466 static const struct vm_operations_struct hmm_bo_vm_ops
= {
1467 .open
= hmm_bo_vm_open
,
1468 .close
= hmm_bo_vm_close
,
1472 * mmap the bo to user space.
1474 int hmm_bo_mmap(struct vm_area_struct
*vma
, struct hmm_buffer_object
*bo
)
1476 unsigned int start
, end
;
1478 unsigned int pgnr
, i
;
1481 check_bo_null_return(bo
, -EINVAL
);
1483 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1486 start
= vma
->vm_start
;
1490 * check vma's virtual address space size and buffer object's size.
1493 if ((start
+ pgnr_to_size(pgnr
)) != end
) {
1494 dev_warn(atomisp_dev
,
1495 "vma's address space size not equal"
1496 " to buffer object's size");
1500 virt
= vma
->vm_start
;
1501 for (i
= 0; i
< pgnr
; i
++) {
1502 pfn
= page_to_pfn(bo
->page_obj
[i
].page
);
1503 if (remap_pfn_range(vma
, virt
, pfn
, PAGE_SIZE
, PAGE_SHARED
)) {
1504 dev_warn(atomisp_dev
,
1505 "remap_pfn_range failed:"
1506 " virt = 0x%x, pfn = 0x%x,"
1507 " mapped_pgnr = %d\n", virt
, pfn
, 1);
1513 vma
->vm_private_data
= bo
;
1515 vma
->vm_ops
= &hmm_bo_vm_ops
;
1516 vma
->vm_flags
|= VM_IO
|VM_DONTEXPAND
|VM_DONTDUMP
;
1519 * call hmm_bo_vm_open explictly.
1521 hmm_bo_vm_open(vma
);
1526 dev_err(atomisp_dev
, "buffer page not allocated yet.\n");