1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/staging/android/ion/ion_chunk_heap.c
5 * Copyright (C) 2012 Google, Inc.
7 #include <linux/dma-mapping.h>
9 #include <linux/genalloc.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
17 struct ion_chunk_heap
{
19 struct gen_pool
*pool
;
21 unsigned long chunk_size
;
23 unsigned long allocated
;
26 static int ion_chunk_heap_allocate(struct ion_heap
*heap
,
27 struct ion_buffer
*buffer
,
31 struct ion_chunk_heap
*chunk_heap
=
32 container_of(heap
, struct ion_chunk_heap
, heap
);
33 struct sg_table
*table
;
34 struct scatterlist
*sg
;
36 unsigned long num_chunks
;
37 unsigned long allocated_size
;
39 allocated_size
= ALIGN(size
, chunk_heap
->chunk_size
);
40 num_chunks
= allocated_size
/ chunk_heap
->chunk_size
;
42 if (allocated_size
> chunk_heap
->size
- chunk_heap
->allocated
)
45 table
= kmalloc(sizeof(*table
), GFP_KERNEL
);
48 ret
= sg_alloc_table(table
, num_chunks
, GFP_KERNEL
);
55 for (i
= 0; i
< num_chunks
; i
++) {
56 unsigned long paddr
= gen_pool_alloc(chunk_heap
->pool
,
57 chunk_heap
->chunk_size
);
60 sg_set_page(sg
, pfn_to_page(PFN_DOWN(paddr
)),
61 chunk_heap
->chunk_size
, 0);
65 buffer
->sg_table
= table
;
66 chunk_heap
->allocated
+= allocated_size
;
70 for (i
-= 1; i
>= 0; i
--) {
71 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
80 static void ion_chunk_heap_free(struct ion_buffer
*buffer
)
82 struct ion_heap
*heap
= buffer
->heap
;
83 struct ion_chunk_heap
*chunk_heap
=
84 container_of(heap
, struct ion_chunk_heap
, heap
);
85 struct sg_table
*table
= buffer
->sg_table
;
86 struct scatterlist
*sg
;
88 unsigned long allocated_size
;
90 allocated_size
= ALIGN(buffer
->size
, chunk_heap
->chunk_size
);
92 ion_heap_buffer_zero(buffer
);
94 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
95 gen_pool_free(chunk_heap
->pool
, page_to_phys(sg_page(sg
)),
98 chunk_heap
->allocated
-= allocated_size
;
103 static struct ion_heap_ops chunk_heap_ops
= {
104 .allocate
= ion_chunk_heap_allocate
,
105 .free
= ion_chunk_heap_free
,
106 .map_user
= ion_heap_map_user
,
107 .map_kernel
= ion_heap_map_kernel
,
108 .unmap_kernel
= ion_heap_unmap_kernel
,
111 struct ion_heap
*ion_chunk_heap_create(struct ion_platform_heap
*heap_data
)
113 struct ion_chunk_heap
*chunk_heap
;
118 page
= pfn_to_page(PFN_DOWN(heap_data
->base
));
119 size
= heap_data
->size
;
121 ret
= ion_heap_pages_zero(page
, size
, pgprot_writecombine(PAGE_KERNEL
));
125 chunk_heap
= kzalloc(sizeof(*chunk_heap
), GFP_KERNEL
);
127 return ERR_PTR(-ENOMEM
);
129 chunk_heap
->chunk_size
= (unsigned long)heap_data
->priv
;
130 chunk_heap
->pool
= gen_pool_create(get_order(chunk_heap
->chunk_size
) +
132 if (!chunk_heap
->pool
) {
134 goto error_gen_pool_create
;
136 chunk_heap
->base
= heap_data
->base
;
137 chunk_heap
->size
= heap_data
->size
;
138 chunk_heap
->allocated
= 0;
140 gen_pool_add(chunk_heap
->pool
, chunk_heap
->base
, heap_data
->size
, -1);
141 chunk_heap
->heap
.ops
= &chunk_heap_ops
;
142 chunk_heap
->heap
.type
= ION_HEAP_TYPE_CHUNK
;
143 chunk_heap
->heap
.flags
= ION_HEAP_FLAG_DEFER_FREE
;
144 pr_debug("%s: base %pa size %zu\n", __func__
,
145 &chunk_heap
->base
, heap_data
->size
);
147 return &chunk_heap
->heap
;
149 error_gen_pool_create
: