]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/staging/android/ion/ion_cma_heap.c
6f6b1e208ec5f475be5ddd90ad8c8bb83638a71d
[thirdparty/kernel/linux.git] / drivers / staging / android / ion / ion_cma_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/staging/android/ion/ion_cma_heap.c
4 *
5 * Copyright (C) Linaro 2012
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19 #include <linux/device.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/cma.h>
24 #include <linux/scatterlist.h>
25
26 #include "ion.h"
27
28 struct ion_cma_heap {
29 struct ion_heap heap;
30 struct cma *cma;
31 };
32
33 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
34
35 /* ION CMA heap operations functions */
36 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
37 unsigned long len,
38 unsigned long flags)
39 {
40 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
41 struct sg_table *table;
42 struct page *pages;
43 int ret;
44
45 pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
46 if (!pages)
47 return -ENOMEM;
48
49 table = kmalloc(sizeof(*table), GFP_KERNEL);
50 if (!table)
51 goto err;
52
53 ret = sg_alloc_table(table, 1, GFP_KERNEL);
54 if (ret)
55 goto free_mem;
56
57 sg_set_page(table->sgl, pages, len, 0);
58
59 buffer->priv_virt = pages;
60 buffer->sg_table = table;
61 return 0;
62
63 free_mem:
64 kfree(table);
65 err:
66 cma_release(cma_heap->cma, pages, buffer->size);
67 return -ENOMEM;
68 }
69
70 static void ion_cma_free(struct ion_buffer *buffer)
71 {
72 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
73 struct page *pages = buffer->priv_virt;
74
75 /* release memory */
76 cma_release(cma_heap->cma, pages, buffer->size);
77 /* release sg table */
78 sg_free_table(buffer->sg_table);
79 kfree(buffer->sg_table);
80 }
81
82 static struct ion_heap_ops ion_cma_ops = {
83 .allocate = ion_cma_allocate,
84 .free = ion_cma_free,
85 .map_user = ion_heap_map_user,
86 .map_kernel = ion_heap_map_kernel,
87 .unmap_kernel = ion_heap_unmap_kernel,
88 };
89
90 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
91 {
92 struct ion_cma_heap *cma_heap;
93
94 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
95
96 if (!cma_heap)
97 return ERR_PTR(-ENOMEM);
98
99 cma_heap->heap.ops = &ion_cma_ops;
100 /*
101 * get device from private heaps data, later it will be
102 * used to make the link with reserved CMA memory
103 */
104 cma_heap->cma = cma;
105 cma_heap->heap.type = ION_HEAP_TYPE_DMA;
106 return &cma_heap->heap;
107 }
108
109 static int __ion_add_cma_heaps(struct cma *cma, void *data)
110 {
111 struct ion_heap *heap;
112
113 heap = __ion_cma_heap_create(cma);
114 if (IS_ERR(heap))
115 return PTR_ERR(heap);
116
117 heap->name = cma_get_name(cma);
118
119 ion_device_add_heap(heap);
120 return 0;
121 }
122
123 static int ion_add_cma_heaps(void)
124 {
125 cma_for_each_area(__ion_add_cma_heaps, NULL);
126 return 0;
127 }
128 device_initcall(ion_add_cma_heaps);