]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/staging/android/ion/ion_chunk_heap.c
mm: rename and change semantics of nr_indirectly_reclaimable_bytes
[thirdparty/kernel/linux.git] / drivers / staging / android / ion / ion_chunk_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/staging/android/ion/ion_chunk_heap.c
4 *
5 * Copyright (C) 2012 Google, Inc.
6 */
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/scatterlist.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include "ion.h"
16
17 struct ion_chunk_heap {
18 struct ion_heap heap;
19 struct gen_pool *pool;
20 phys_addr_t base;
21 unsigned long chunk_size;
22 unsigned long size;
23 unsigned long allocated;
24 };
25
26 static int ion_chunk_heap_allocate(struct ion_heap *heap,
27 struct ion_buffer *buffer,
28 unsigned long size,
29 unsigned long flags)
30 {
31 struct ion_chunk_heap *chunk_heap =
32 container_of(heap, struct ion_chunk_heap, heap);
33 struct sg_table *table;
34 struct scatterlist *sg;
35 int ret, i;
36 unsigned long num_chunks;
37 unsigned long allocated_size;
38
39 allocated_size = ALIGN(size, chunk_heap->chunk_size);
40 num_chunks = allocated_size / chunk_heap->chunk_size;
41
42 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
43 return -ENOMEM;
44
45 table = kmalloc(sizeof(*table), GFP_KERNEL);
46 if (!table)
47 return -ENOMEM;
48 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
49 if (ret) {
50 kfree(table);
51 return ret;
52 }
53
54 sg = table->sgl;
55 for (i = 0; i < num_chunks; i++) {
56 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
57 chunk_heap->chunk_size);
58 if (!paddr)
59 goto err;
60 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
61 chunk_heap->chunk_size, 0);
62 sg = sg_next(sg);
63 }
64
65 buffer->sg_table = table;
66 chunk_heap->allocated += allocated_size;
67 return 0;
68 err:
69 sg = table->sgl;
70 for (i -= 1; i >= 0; i--) {
71 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
72 sg->length);
73 sg = sg_next(sg);
74 }
75 sg_free_table(table);
76 kfree(table);
77 return -ENOMEM;
78 }
79
80 static void ion_chunk_heap_free(struct ion_buffer *buffer)
81 {
82 struct ion_heap *heap = buffer->heap;
83 struct ion_chunk_heap *chunk_heap =
84 container_of(heap, struct ion_chunk_heap, heap);
85 struct sg_table *table = buffer->sg_table;
86 struct scatterlist *sg;
87 int i;
88 unsigned long allocated_size;
89
90 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
91
92 ion_heap_buffer_zero(buffer);
93
94 for_each_sg(table->sgl, sg, table->nents, i) {
95 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
96 sg->length);
97 }
98 chunk_heap->allocated -= allocated_size;
99 sg_free_table(table);
100 kfree(table);
101 }
102
103 static struct ion_heap_ops chunk_heap_ops = {
104 .allocate = ion_chunk_heap_allocate,
105 .free = ion_chunk_heap_free,
106 .map_user = ion_heap_map_user,
107 .map_kernel = ion_heap_map_kernel,
108 .unmap_kernel = ion_heap_unmap_kernel,
109 };
110
111 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
112 {
113 struct ion_chunk_heap *chunk_heap;
114 int ret;
115 struct page *page;
116 size_t size;
117
118 page = pfn_to_page(PFN_DOWN(heap_data->base));
119 size = heap_data->size;
120
121 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
122 if (ret)
123 return ERR_PTR(ret);
124
125 chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
126 if (!chunk_heap)
127 return ERR_PTR(-ENOMEM);
128
129 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
130 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
131 PAGE_SHIFT, -1);
132 if (!chunk_heap->pool) {
133 ret = -ENOMEM;
134 goto error_gen_pool_create;
135 }
136 chunk_heap->base = heap_data->base;
137 chunk_heap->size = heap_data->size;
138 chunk_heap->allocated = 0;
139
140 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
141 chunk_heap->heap.ops = &chunk_heap_ops;
142 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
143 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
144 pr_debug("%s: base %pa size %zu\n", __func__,
145 &chunk_heap->base, heap_data->size);
146
147 return &chunk_heap->heap;
148
149 error_gen_pool_create:
150 kfree(chunk_heap);
151 return ERR_PTR(ret);
152 }
153