]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/staging/android/ion/ion.h
Linux 5.10-rc1
[thirdparty/kernel/linux.git] / drivers / staging / android / ion / ion.h
CommitLineData
5f610430 1/* SPDX-License-Identifier: GPL-2.0 */
c30707be 2/*
32462a98 3 * ION Memory Allocator kernel interface header
c30707be
RSZ
4 *
5 * Copyright (C) 2011 Google, Inc.
c30707be
RSZ
6 */
7
eb9751db
LA
8#ifndef _ION_H
9#define _ION_H
c30707be 10
eb9751db
LA
11#include <linux/device.h>
12#include <linux/dma-direction.h>
13#include <linux/kref.h>
14#include <linux/mm_types.h>
15#include <linux/mutex.h>
16#include <linux/rbtree.h>
17#include <linux/sched.h>
18#include <linux/shrinker.h>
c30707be 19#include <linux/types.h>
eb9751db 20#include <linux/miscdevice.h>
c30707be 21
c3a2fe03 22#include "../uapi/ion.h"
22b7f24d 23
c30707be 24/**
eb9751db 25 * struct ion_buffer - metadata for a particular buffer
96d12a0d 26 * @list: element in list of deferred freeable buffers
eb9751db
LA
27 * @dev: back pointer to the ion_device
28 * @heap: back pointer to the heap the buffer came from
29 * @flags: buffer specific flags
30 * @private_flags: internal buffer specific flags
31 * @size: size of the buffer
32 * @priv_virt: private data to the buffer representable as
33 * a void *
34 * @lock: protects the buffers cnt fields
35 * @kmap_cnt: number of times the buffer is mapped to the kernel
36 * @vaddr: the kernel mapping if kmap_cnt is not zero
96d12a0d
AD
37 * @sg_table: the sg table for the buffer
38 * @attachments: list of devices attached to this buffer
eb9751db
LA
39 */
40struct ion_buffer {
3e6998b1 41 struct list_head list;
eb9751db
LA
42 struct ion_device *dev;
43 struct ion_heap *heap;
44 unsigned long flags;
45 unsigned long private_flags;
46 size_t size;
47 void *priv_virt;
48 struct mutex lock;
49 int kmap_cnt;
50 void *vaddr;
51 struct sg_table *sg_table;
eb9751db 52 struct list_head attachments;
eb9751db 53};
0d27c94a 54
eb9751db
LA
55void ion_buffer_destroy(struct ion_buffer *buffer);
56
57/**
58 * struct ion_device - the metadata of the ion device node
59 * @dev: the actual misc device
eb9751db 60 * @lock: rwsem protecting the tree of heaps and clients
eb9751db
LA
61 */
62struct ion_device {
63 struct miscdevice dev;
eb9751db
LA
64 struct rw_semaphore lock;
65 struct plist_head heaps;
eb9751db 66 struct dentry *debug_root;
eb9751db
LA
67 int heap_cnt;
68};
69
eb9751db
LA
70/**
71 * struct ion_heap_ops - ops to operate on a given heap
72 * @allocate: allocate memory
73 * @free: free memory
74 * @map_kernel map memory to the kernel
75 * @unmap_kernel unmap memory to the kernel
76 * @map_user map memory to userspace
c30707be 77 *
eb9751db
LA
78 * allocate, phys, and map_user return 0 on success, -errno on error.
79 * map_dma and map_kernel return pointer on success, ERR_PTR on
80 * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
81 * the buffer's private_flags when called from a shrinker. In that
82 * case, the pages being free'd must be truly free'd back to the
83 * system, not put in a page pool or otherwise cached.
c30707be 84 */
eb9751db
LA
85struct ion_heap_ops {
86 int (*allocate)(struct ion_heap *heap,
87 struct ion_buffer *buffer, unsigned long len,
88 unsigned long flags);
89 void (*free)(struct ion_buffer *buffer);
90 void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
91 void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
92 int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
93 struct vm_area_struct *vma);
94 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
c30707be
RSZ
95};
96
97/**
eb9751db
LA
98 * heap flags - flags between the heaps and core ion code
99 */
2fcd74b7 100#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
eb9751db
LA
101
102/**
103 * private flags - flags internal to ion
104 */
105/*
106 * Buffer is being freed from a shrinker function. Skip any possible
107 * heap-specific caching mechanism (e.g. page pools). Guarantees that
108 * any buffer storage that came from the system allocator will be
109 * returned to the system allocator.
110 */
2fcd74b7 111#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
eb9751db
LA
112
113/**
114 * struct ion_heap - represents a heap in the system
115 * @node: rb node to put the heap on the device's tree of heaps
116 * @dev: back pointer to the ion_device
117 * @type: type of heap
118 * @ops: ops struct as above
119 * @flags: flags
120 * @id: id of heap, also indicates priority of this heap when
121 * allocating. These are specified by platform data and
122 * MUST be unique
38eeeb51 123 * @name: used for debugging
eb9751db
LA
124 * @shrinker: a shrinker for the heap
125 * @free_list: free list head if deferred free is used
126 * @free_list_size size of the deferred free list in bytes
127 * @lock: protects the free list
128 * @waitqueue: queue to wait on from deferred free thread
129 * @task: task struct of deferred free thread
4073536c
AS
130 * @num_of_buffers the number of currently allocated buffers
131 * @num_of_alloc_bytes the number of allocated bytes
132 * @alloc_bytes_wm the number of allocated bytes watermark
eb9751db
LA
133 *
134 * Represents a pool of memory from which buffers can be made. In some
135 * systems the only heap is regular system memory allocated via vmalloc.
136 * On others, some blocks might require large physically contiguous buffers
137 * that are allocated from a specially reserved heap.
c30707be 138 */
eb9751db
LA
139struct ion_heap {
140 struct plist_node node;
141 struct ion_device *dev;
142 enum ion_heap_type type;
143 struct ion_heap_ops *ops;
144 unsigned long flags;
145 unsigned int id;
146 const char *name;
96d12a0d
AD
147
148 /* deferred free support */
eb9751db
LA
149 struct shrinker shrinker;
150 struct list_head free_list;
151 size_t free_list_size;
152 spinlock_t free_lock;
153 wait_queue_head_t waitqueue;
154 struct task_struct *task;
96d12a0d
AD
155
156 /* heap statistics */
4073536c
AS
157 u64 num_of_buffers;
158 u64 num_of_alloc_bytes;
159 u64 alloc_bytes_wm;
160
161 /* protect heap statistics */
162 spinlock_t stat_lock;
eb9751db 163};
c30707be 164
eb9751db
LA
165/**
166 * ion_device_add_heap - adds a heap to the ion device
eb9751db 167 * @heap: the heap to add
c30707be 168 */
2f87f50b 169void ion_device_add_heap(struct ion_heap *heap);
eb9751db
LA
170
171/**
172 * some helpers for common operations on buffers using the sg_table
173 * and vaddr fields
174 */
175void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
176void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
177int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
178 struct vm_area_struct *vma);
179int ion_heap_buffer_zero(struct ion_buffer *buffer);
eb9751db 180
c30707be 181/**
eb9751db
LA
182 * ion_heap_init_shrinker
183 * @heap: the heap
c30707be 184 *
eb9751db
LA
185 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
186 * this function will be called to setup a shrinker to shrink the freelists
187 * and call the heap's shrink op.
c30707be 188 */
47ac54e8 189int ion_heap_init_shrinker(struct ion_heap *heap);
c30707be
RSZ
190
191/**
eb9751db
LA
192 * ion_heap_init_deferred_free -- initialize deferred free functionality
193 * @heap: the heap
194 *
195 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
196 * be called to setup deferred frees. Calls to free the buffer will
197 * return immediately and the actual free will occur some time later
c30707be 198 */
eb9751db 199int ion_heap_init_deferred_free(struct ion_heap *heap);
c30707be
RSZ
200
201/**
eb9751db
LA
202 * ion_heap_freelist_add - add a buffer to the deferred free list
203 * @heap: the heap
204 * @buffer: the buffer
205 *
206 * Adds an item to the deferred freelist.
c30707be 207 */
eb9751db 208void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
22ba4322
JM
209
210/**
eb9751db
LA
211 * ion_heap_freelist_drain - drain the deferred free list
212 * @heap: the heap
213 * @size: amount of memory to drain in bytes
214 *
215 * Drains the indicated amount of memory from the deferred freelist immediately.
216 * Returns the total amount freed. The total freed may be higher depending
217 * on the size of the items in the list, or lower if there is insufficient
218 * total memory on the freelist.
22ba4322 219 */
eb9751db 220size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
c30707be
RSZ
221
222/**
eb9751db
LA
223 * ion_heap_freelist_shrink - drain the deferred free
224 * list, skipping any heap-specific
225 * pooling or caching mechanisms
226 *
227 * @heap: the heap
228 * @size: amount of memory to drain in bytes
229 *
230 * Drains the indicated amount of memory from the deferred freelist immediately.
231 * Returns the total amount freed. The total freed may be higher depending
232 * on the size of the items in the list, or lower if there is insufficient
233 * total memory on the freelist.
9f90381b 234 *
eb9751db
LA
235 * Unlike with @ion_heap_freelist_drain, don't put any pages back into
236 * page pools or otherwise cache the pages. Everything must be
237 * genuinely free'd back to the system. If you're free'ing from a
238 * shrinker you probably want to use this. Note that this relies on
239 * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
240 * flag.
241 */
242size_t ion_heap_freelist_shrink(struct ion_heap *heap,
feb9828b 243 size_t size);
eb9751db
LA
244
245/**
246 * ion_heap_freelist_size - returns the size of the freelist in bytes
247 * @heap: the heap
248 */
249size_t ion_heap_freelist_size(struct ion_heap *heap);
250
9f90381b 251/**
eb9751db
LA
252 * functions for creating and destroying a heap pool -- allows you
253 * to keep a pool of pre allocated memory to use from your heap. Keeping
254 * a pool of memory that is ready for dma, ie any cached mapping have been
255 * invalidated from the cache, provides a significant performance benefit on
256 * many systems
257 */
258
259/**
260 * struct ion_page_pool - pagepool struct
261 * @high_count: number of highmem items in the pool
262 * @low_count: number of lowmem items in the pool
263 * @high_items: list of highmem items
264 * @low_items: list of lowmem items
265 * @mutex: lock protecting this struct and especially the count
266 * item list
267 * @gfp_mask: gfp_mask to use from alloc
268 * @order: order of pages in the pool
269 * @list: plist node for list of pools
c30707be 270 *
eb9751db
LA
271 * Allows you to keep a pool of pre allocated pages to use from your heap.
272 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
273 * been invalidated from the cache, provides a significant performance benefit
274 * on many systems
c30707be 275 */
eb9751db
LA
276struct ion_page_pool {
277 int high_count;
278 int low_count;
eb9751db
LA
279 struct list_head high_items;
280 struct list_head low_items;
281 struct mutex mutex;
282 gfp_t gfp_mask;
283 unsigned int order;
284 struct plist_node list;
285};
286
d92a1fab 287struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
eb9751db
LA
288void ion_page_pool_destroy(struct ion_page_pool *pool);
289struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
290void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
291
292/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
293 * @pool: the pool
294 * @gfp_mask: the memory type to reclaim
295 * @nr_to_scan: number of items to shrink in pages
296 *
297 * returns the number of items freed in pages
298 */
299int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
feb9828b 300 int nr_to_scan);
eb9751db 301
eb9751db 302#endif /* _ION_H */