]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob
7ff2a7ec871f
[thirdparty/kernel/stable.git] /
1 /*
2 *
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53 struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
65 };
66
67 /**
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83 struct ion_client {
84 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
87 struct idr idr;
88 struct mutex lock;
89 const char *name;
90 char *display_name;
91 int display_serial;
92 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95 };
96
97 /**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109 struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
115 int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126 return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131 return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136 return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152 {
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
181 {
182 struct ion_buffer *buffer;
183 struct sg_table *table;
184 struct scatterlist *sg;
185 int i, ret;
186
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
192 buffer->flags = flags;
193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197 if (ret) {
198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
201 ion_heap_freelist_drain(heap, 0);
202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
206 }
207
208 buffer->dev = dev;
209 buffer->size = len;
210
211 table = heap->ops->map_dma(heap, buffer);
212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table = ERR_PTR(-EINVAL);
215 if (IS_ERR(table)) {
216 ret = -EINVAL;
217 goto err1;
218 }
219
220 buffer->sg_table = table;
221 if (ion_buffer_fault_user_mappings(buffer)) {
222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
224 int i, j, k = 0;
225
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
228 ret = -ENOMEM;
229 goto err;
230 }
231
232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
234
235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
236 buffer->pages[k++] = page++;
237 }
238 }
239
240 buffer->dev = dev;
241 buffer->size = len;
242 INIT_LIST_HEAD(&buffer->vmas);
243 mutex_init(&buffer->lock);
244 /*
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
253 */
254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
255 sg_dma_address(sg) = sg_phys(sg);
256 sg_dma_len(sg) = sg->length;
257 }
258 mutex_lock(&dev->buffer_lock);
259 ion_buffer_add(dev, buffer);
260 mutex_unlock(&dev->buffer_lock);
261 return buffer;
262
263 err:
264 heap->ops->unmap_dma(heap, buffer);
265 err1:
266 heap->ops->free(buffer);
267 err2:
268 kfree(buffer);
269 return ERR_PTR(ret);
270 }
271
272 void ion_buffer_destroy(struct ion_buffer *buffer)
273 {
274 if (WARN_ON(buffer->kmap_cnt > 0))
275 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277 buffer->heap->ops->free(buffer);
278 vfree(buffer->pages);
279 kfree(buffer);
280 }
281
282 static void _ion_buffer_destroy(struct kref *kref)
283 {
284 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285 struct ion_heap *heap = buffer->heap;
286 struct ion_device *dev = buffer->dev;
287
288 mutex_lock(&dev->buffer_lock);
289 rb_erase(&buffer->node, &dev->buffers);
290 mutex_unlock(&dev->buffer_lock);
291
292 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293 ion_heap_freelist_add(heap, buffer);
294 else
295 ion_buffer_destroy(buffer);
296 }
297
298 static void ion_buffer_get(struct ion_buffer *buffer)
299 {
300 kref_get(&buffer->ref);
301 }
302
303 static int ion_buffer_put(struct ion_buffer *buffer)
304 {
305 return kref_put(&buffer->ref, _ion_buffer_destroy);
306 }
307
308 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309 {
310 mutex_lock(&buffer->lock);
311 buffer->handle_count++;
312 mutex_unlock(&buffer->lock);
313 }
314
315 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316 {
317 /*
318 * when a buffer is removed from a handle, if it is not in
319 * any other handles, copy the taskcomm and the pid of the
320 * process it's being removed from into the buffer. At this
321 * point there will be no way to track what processes this buffer is
322 * being used by, it only exists as a dma_buf file descriptor.
323 * The taskcomm and pid can provide a debug hint as to where this fd
324 * is in the system
325 */
326 mutex_lock(&buffer->lock);
327 buffer->handle_count--;
328 BUG_ON(buffer->handle_count < 0);
329 if (!buffer->handle_count) {
330 struct task_struct *task;
331
332 task = current->group_leader;
333 get_task_comm(buffer->task_comm, task);
334 buffer->pid = task_pid_nr(task);
335 }
336 mutex_unlock(&buffer->lock);
337 }
338
339 static struct ion_handle *ion_handle_create(struct ion_client *client,
340 struct ion_buffer *buffer)
341 {
342 struct ion_handle *handle;
343
344 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
345 if (!handle)
346 return ERR_PTR(-ENOMEM);
347 kref_init(&handle->ref);
348 RB_CLEAR_NODE(&handle->node);
349 handle->client = client;
350 ion_buffer_get(buffer);
351 ion_buffer_add_to_handle(buffer);
352 handle->buffer = buffer;
353
354 return handle;
355 }
356
357 static void ion_handle_kmap_put(struct ion_handle *);
358
359 static void ion_handle_destroy(struct kref *kref)
360 {
361 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362 struct ion_client *client = handle->client;
363 struct ion_buffer *buffer = handle->buffer;
364
365 mutex_lock(&buffer->lock);
366 while (handle->kmap_cnt)
367 ion_handle_kmap_put(handle);
368 mutex_unlock(&buffer->lock);
369
370 idr_remove(&client->idr, handle->id);
371 if (!RB_EMPTY_NODE(&handle->node))
372 rb_erase(&handle->node, &client->handles);
373
374 ion_buffer_remove_from_handle(buffer);
375 ion_buffer_put(buffer);
376
377 kfree(handle);
378 }
379
380 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381 {
382 return handle->buffer;
383 }
384
385 static void ion_handle_get(struct ion_handle *handle)
386 {
387 kref_get(&handle->ref);
388 }
389
390 static int ion_handle_put(struct ion_handle *handle)
391 {
392 struct ion_client *client = handle->client;
393 int ret;
394
395 mutex_lock(&client->lock);
396 ret = kref_put(&handle->ref, ion_handle_destroy);
397 mutex_unlock(&client->lock);
398
399 return ret;
400 }
401
402 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
403 struct ion_buffer *buffer)
404 {
405 struct rb_node *n = client->handles.rb_node;
406
407 while (n) {
408 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
409
410 if (buffer < entry->buffer)
411 n = n->rb_left;
412 else if (buffer > entry->buffer)
413 n = n->rb_right;
414 else
415 return entry;
416 }
417 return ERR_PTR(-EINVAL);
418 }
419
420 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
421 int id)
422 {
423 struct ion_handle *handle;
424
425 mutex_lock(&client->lock);
426 handle = idr_find(&client->idr, id);
427 if (handle)
428 ion_handle_get(handle);
429 mutex_unlock(&client->lock);
430
431 return handle ? handle : ERR_PTR(-EINVAL);
432 }
433
434 static bool ion_handle_validate(struct ion_client *client,
435 struct ion_handle *handle)
436 {
437 WARN_ON(!mutex_is_locked(&client->lock));
438 return idr_find(&client->idr, handle->id) == handle;
439 }
440
441 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
442 {
443 int id;
444 struct rb_node **p = &client->handles.rb_node;
445 struct rb_node *parent = NULL;
446 struct ion_handle *entry;
447
448 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
449 if (id < 0)
450 return id;
451
452 handle->id = id;
453
454 while (*p) {
455 parent = *p;
456 entry = rb_entry(parent, struct ion_handle, node);
457
458 if (handle->buffer < entry->buffer)
459 p = &(*p)->rb_left;
460 else if (handle->buffer > entry->buffer)
461 p = &(*p)->rb_right;
462 else
463 WARN(1, "%s: buffer already found.", __func__);
464 }
465
466 rb_link_node(&handle->node, parent, p);
467 rb_insert_color(&handle->node, &client->handles);
468
469 return 0;
470 }
471
472 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
473 size_t align, unsigned int heap_id_mask,
474 unsigned int flags)
475 {
476 struct ion_handle *handle;
477 struct ion_device *dev = client->dev;
478 struct ion_buffer *buffer = NULL;
479 struct ion_heap *heap;
480 int ret;
481
482 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
483 len, align, heap_id_mask, flags);
484 /*
485 * traverse the list of heaps available in this system in priority
486 * order. If the heap type is supported by the client, and matches the
487 * request of the caller allocate from it. Repeat until allocate has
488 * succeeded or all heaps have been tried
489 */
490 len = PAGE_ALIGN(len);
491
492 if (!len)
493 return ERR_PTR(-EINVAL);
494
495 down_read(&dev->lock);
496 plist_for_each_entry(heap, &dev->heaps, node) {
497 /* if the caller didn't specify this heap id */
498 if (!((1 << heap->id) & heap_id_mask))
499 continue;
500 buffer = ion_buffer_create(heap, dev, len, align, flags);
501 if (!IS_ERR(buffer))
502 break;
503 }
504 up_read(&dev->lock);
505
506 if (buffer == NULL)
507 return ERR_PTR(-ENODEV);
508
509 if (IS_ERR(buffer))
510 return ERR_CAST(buffer);
511
512 handle = ion_handle_create(client, buffer);
513
514 /*
515 * ion_buffer_create will create a buffer with a ref_cnt of 1,
516 * and ion_handle_create will take a second reference, drop one here
517 */
518 ion_buffer_put(buffer);
519
520 if (IS_ERR(handle))
521 return handle;
522
523 mutex_lock(&client->lock);
524 ret = ion_handle_add(client, handle);
525 mutex_unlock(&client->lock);
526 if (ret) {
527 ion_handle_put(handle);
528 handle = ERR_PTR(ret);
529 }
530
531 return handle;
532 }
533 EXPORT_SYMBOL(ion_alloc);
534
535 void ion_free(struct ion_client *client, struct ion_handle *handle)
536 {
537 bool valid_handle;
538
539 BUG_ON(client != handle->client);
540
541 mutex_lock(&client->lock);
542 valid_handle = ion_handle_validate(client, handle);
543
544 if (!valid_handle) {
545 WARN(1, "%s: invalid handle passed to free.\n", __func__);
546 mutex_unlock(&client->lock);
547 return;
548 }
549 mutex_unlock(&client->lock);
550 ion_handle_put(handle);
551 }
552 EXPORT_SYMBOL(ion_free);
553
554 int ion_phys(struct ion_client *client, struct ion_handle *handle,
555 ion_phys_addr_t *addr, size_t *len)
556 {
557 struct ion_buffer *buffer;
558 int ret;
559
560 mutex_lock(&client->lock);
561 if (!ion_handle_validate(client, handle)) {
562 mutex_unlock(&client->lock);
563 return -EINVAL;
564 }
565
566 buffer = handle->buffer;
567
568 if (!buffer->heap->ops->phys) {
569 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
570 __func__, buffer->heap->name, buffer->heap->type);
571 mutex_unlock(&client->lock);
572 return -ENODEV;
573 }
574 mutex_unlock(&client->lock);
575 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
576 return ret;
577 }
578 EXPORT_SYMBOL(ion_phys);
579
580 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
581 {
582 void *vaddr;
583
584 if (buffer->kmap_cnt) {
585 buffer->kmap_cnt++;
586 return buffer->vaddr;
587 }
588 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
589 if (WARN_ONCE(vaddr == NULL,
590 "heap->ops->map_kernel should return ERR_PTR on error"))
591 return ERR_PTR(-EINVAL);
592 if (IS_ERR(vaddr))
593 return vaddr;
594 buffer->vaddr = vaddr;
595 buffer->kmap_cnt++;
596 return vaddr;
597 }
598
599 static void *ion_handle_kmap_get(struct ion_handle *handle)
600 {
601 struct ion_buffer *buffer = handle->buffer;
602 void *vaddr;
603
604 if (handle->kmap_cnt) {
605 handle->kmap_cnt++;
606 return buffer->vaddr;
607 }
608 vaddr = ion_buffer_kmap_get(buffer);
609 if (IS_ERR(vaddr))
610 return vaddr;
611 handle->kmap_cnt++;
612 return vaddr;
613 }
614
615 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
616 {
617 buffer->kmap_cnt--;
618 if (!buffer->kmap_cnt) {
619 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
620 buffer->vaddr = NULL;
621 }
622 }
623
624 static void ion_handle_kmap_put(struct ion_handle *handle)
625 {
626 struct ion_buffer *buffer = handle->buffer;
627
628 if (!handle->kmap_cnt) {
629 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
630 return;
631 }
632 handle->kmap_cnt--;
633 if (!handle->kmap_cnt)
634 ion_buffer_kmap_put(buffer);
635 }
636
637 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
638 {
639 struct ion_buffer *buffer;
640 void *vaddr;
641
642 mutex_lock(&client->lock);
643 if (!ion_handle_validate(client, handle)) {
644 pr_err("%s: invalid handle passed to map_kernel.\n",
645 __func__);
646 mutex_unlock(&client->lock);
647 return ERR_PTR(-EINVAL);
648 }
649
650 buffer = handle->buffer;
651
652 if (!handle->buffer->heap->ops->map_kernel) {
653 pr_err("%s: map_kernel is not implemented by this heap.\n",
654 __func__);
655 mutex_unlock(&client->lock);
656 return ERR_PTR(-ENODEV);
657 }
658
659 mutex_lock(&buffer->lock);
660 vaddr = ion_handle_kmap_get(handle);
661 mutex_unlock(&buffer->lock);
662 mutex_unlock(&client->lock);
663 return vaddr;
664 }
665 EXPORT_SYMBOL(ion_map_kernel);
666
667 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
668 {
669 struct ion_buffer *buffer;
670
671 mutex_lock(&client->lock);
672 buffer = handle->buffer;
673 mutex_lock(&buffer->lock);
674 ion_handle_kmap_put(handle);
675 mutex_unlock(&buffer->lock);
676 mutex_unlock(&client->lock);
677 }
678 EXPORT_SYMBOL(ion_unmap_kernel);
679
680 static struct mutex debugfs_mutex;
681 static struct rb_root *ion_root_client;
682 static int is_client_alive(struct ion_client *client)
683 {
684 struct rb_node *node;
685 struct ion_client *tmp;
686 struct ion_device *dev;
687
688 node = ion_root_client->rb_node;
689 dev = container_of(ion_root_client, struct ion_device, clients);
690
691 down_read(&dev->lock);
692 while (node) {
693 tmp = rb_entry(node, struct ion_client, node);
694 if (client < tmp) {
695 node = node->rb_left;
696 } else if (client > tmp) {
697 node = node->rb_right;
698 } else {
699 up_read(&dev->lock);
700 return 1;
701 }
702 }
703
704 up_read(&dev->lock);
705 return 0;
706 }
707
708 static int ion_debug_client_show(struct seq_file *s, void *unused)
709 {
710 struct ion_client *client = s->private;
711 struct rb_node *n;
712 size_t sizes[ION_NUM_HEAP_IDS] = {0};
713 const char *names[ION_NUM_HEAP_IDS] = {NULL};
714 int i;
715
716 mutex_lock(&debugfs_mutex);
717 if (!is_client_alive(client)) {
718 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
719 client);
720 mutex_unlock(&debugfs_mutex);
721 return 0;
722 }
723
724 mutex_lock(&client->lock);
725 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
726 struct ion_handle *handle = rb_entry(n, struct ion_handle,
727 node);
728 unsigned int id = handle->buffer->heap->id;
729
730 if (!names[id])
731 names[id] = handle->buffer->heap->name;
732 sizes[id] += handle->buffer->size;
733 }
734 mutex_unlock(&client->lock);
735 mutex_unlock(&debugfs_mutex);
736
737 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
738 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
739 if (!names[i])
740 continue;
741 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
742 }
743 return 0;
744 }
745
746 static int ion_debug_client_open(struct inode *inode, struct file *file)
747 {
748 return single_open(file, ion_debug_client_show, inode->i_private);
749 }
750
751 static const struct file_operations debug_client_fops = {
752 .open = ion_debug_client_open,
753 .read = seq_read,
754 .llseek = seq_lseek,
755 .release = single_release,
756 };
757
758 static int ion_get_client_serial(const struct rb_root *root,
759 const unsigned char *name)
760 {
761 int serial = -1;
762 struct rb_node *node;
763
764 for (node = rb_first(root); node; node = rb_next(node)) {
765 struct ion_client *client = rb_entry(node, struct ion_client,
766 node);
767
768 if (strcmp(client->name, name))
769 continue;
770 serial = max(serial, client->display_serial);
771 }
772 return serial + 1;
773 }
774
775 struct ion_client *ion_client_create(struct ion_device *dev,
776 const char *name)
777 {
778 struct ion_client *client;
779 struct task_struct *task;
780 struct rb_node **p;
781 struct rb_node *parent = NULL;
782 struct ion_client *entry;
783 pid_t pid;
784
785 if (!name) {
786 pr_err("%s: Name cannot be null\n", __func__);
787 return ERR_PTR(-EINVAL);
788 }
789
790 get_task_struct(current->group_leader);
791 task_lock(current->group_leader);
792 pid = task_pid_nr(current->group_leader);
793 /*
794 * don't bother to store task struct for kernel threads,
795 * they can't be killed anyway
796 */
797 if (current->group_leader->flags & PF_KTHREAD) {
798 put_task_struct(current->group_leader);
799 task = NULL;
800 } else {
801 task = current->group_leader;
802 }
803 task_unlock(current->group_leader);
804
805 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
806 if (!client)
807 goto err_put_task_struct;
808
809 client->dev = dev;
810 client->handles = RB_ROOT;
811 idr_init(&client->idr);
812 mutex_init(&client->lock);
813 client->task = task;
814 client->pid = pid;
815 client->name = kstrdup(name, GFP_KERNEL);
816 if (!client->name)
817 goto err_free_client;
818
819 down_write(&dev->lock);
820 client->display_serial = ion_get_client_serial(&dev->clients, name);
821 client->display_name = kasprintf(
822 GFP_KERNEL, "%s-%d", name, client->display_serial);
823 if (!client->display_name) {
824 up_write(&dev->lock);
825 goto err_free_client_name;
826 }
827 p = &dev->clients.rb_node;
828 while (*p) {
829 parent = *p;
830 entry = rb_entry(parent, struct ion_client, node);
831
832 if (client < entry)
833 p = &(*p)->rb_left;
834 else if (client > entry)
835 p = &(*p)->rb_right;
836 }
837 rb_link_node(&client->node, parent, p);
838 rb_insert_color(&client->node, &dev->clients);
839
840 client->debug_root = debugfs_create_file(client->display_name, 0664,
841 dev->clients_debug_root,
842 client, &debug_client_fops);
843 if (!client->debug_root) {
844 char buf[256], *path;
845
846 path = dentry_path(dev->clients_debug_root, buf, 256);
847 pr_err("Failed to create client debugfs at %s/%s\n",
848 path, client->display_name);
849 }
850
851 up_write(&dev->lock);
852
853 return client;
854
855 err_free_client_name:
856 kfree(client->name);
857 err_free_client:
858 kfree(client);
859 err_put_task_struct:
860 if (task)
861 put_task_struct(current->group_leader);
862 return ERR_PTR(-ENOMEM);
863 }
864 EXPORT_SYMBOL(ion_client_create);
865
866 void ion_client_destroy(struct ion_client *client)
867 {
868 struct ion_device *dev = client->dev;
869 struct rb_node *n;
870
871 pr_debug("%s: %d\n", __func__, __LINE__);
872 mutex_lock(&debugfs_mutex);
873 while ((n = rb_first(&client->handles))) {
874 struct ion_handle *handle = rb_entry(n, struct ion_handle,
875 node);
876 ion_handle_destroy(&handle->ref);
877 }
878
879 idr_destroy(&client->idr);
880
881 down_write(&dev->lock);
882 if (client->task)
883 put_task_struct(client->task);
884 rb_erase(&client->node, &dev->clients);
885 debugfs_remove_recursive(client->debug_root);
886 up_write(&dev->lock);
887
888 kfree(client->display_name);
889 kfree(client->name);
890 kfree(client);
891 mutex_unlock(&debugfs_mutex);
892 }
893 EXPORT_SYMBOL(ion_client_destroy);
894
895 struct sg_table *ion_sg_table(struct ion_client *client,
896 struct ion_handle *handle)
897 {
898 struct ion_buffer *buffer;
899 struct sg_table *table;
900
901 mutex_lock(&client->lock);
902 if (!ion_handle_validate(client, handle)) {
903 pr_err("%s: invalid handle passed to map_dma.\n",
904 __func__);
905 mutex_unlock(&client->lock);
906 return ERR_PTR(-EINVAL);
907 }
908 buffer = handle->buffer;
909 table = buffer->sg_table;
910 mutex_unlock(&client->lock);
911 return table;
912 }
913 EXPORT_SYMBOL(ion_sg_table);
914
915 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
916 struct device *dev,
917 enum dma_data_direction direction);
918
919 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
920 enum dma_data_direction direction)
921 {
922 struct dma_buf *dmabuf = attachment->dmabuf;
923 struct ion_buffer *buffer = dmabuf->priv;
924
925 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
926 return buffer->sg_table;
927 }
928
929 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
930 struct sg_table *table,
931 enum dma_data_direction direction)
932 {
933 }
934
935 void ion_pages_sync_for_device(struct device *dev, struct page *page,
936 size_t size, enum dma_data_direction dir)
937 {
938 struct scatterlist sg;
939
940 sg_init_table(&sg, 1);
941 sg_set_page(&sg, page, size, 0);
942 /*
943 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
944 * for the targeted device, but this works on the currently targeted
945 * hardware.
946 */
947 sg_dma_address(&sg) = page_to_phys(page);
948 dma_sync_sg_for_device(dev, &sg, 1, dir);
949 }
950
951 struct ion_vma_list {
952 struct list_head list;
953 struct vm_area_struct *vma;
954 };
955
956 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
957 struct device *dev,
958 enum dma_data_direction dir)
959 {
960 struct ion_vma_list *vma_list;
961 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
962 int i;
963
964 pr_debug("%s: syncing for device %s\n", __func__,
965 dev ? dev_name(dev) : "null");
966
967 if (!ion_buffer_fault_user_mappings(buffer))
968 return;
969
970 mutex_lock(&buffer->lock);
971 for (i = 0; i < pages; i++) {
972 struct page *page = buffer->pages[i];
973
974 if (ion_buffer_page_is_dirty(page))
975 ion_pages_sync_for_device(dev, ion_buffer_page(page),
976 PAGE_SIZE, dir);
977
978 ion_buffer_page_clean(buffer->pages + i);
979 }
980 list_for_each_entry(vma_list, &buffer->vmas, list) {
981 struct vm_area_struct *vma = vma_list->vma;
982
983 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
984 NULL);
985 }
986 mutex_unlock(&buffer->lock);
987 }
988
989 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
990 {
991 struct ion_buffer *buffer = vma->vm_private_data;
992 unsigned long pfn;
993 int ret;
994
995 mutex_lock(&buffer->lock);
996 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
997 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
998
999 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1000 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1001 mutex_unlock(&buffer->lock);
1002 if (ret)
1003 return VM_FAULT_ERROR;
1004
1005 return VM_FAULT_NOPAGE;
1006 }
1007
1008 static void ion_vm_open(struct vm_area_struct *vma)
1009 {
1010 struct ion_buffer *buffer = vma->vm_private_data;
1011 struct ion_vma_list *vma_list;
1012
1013 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1014 if (!vma_list)
1015 return;
1016 vma_list->vma = vma;
1017 mutex_lock(&buffer->lock);
1018 list_add(&vma_list->list, &buffer->vmas);
1019 mutex_unlock(&buffer->lock);
1020 pr_debug("%s: adding %p\n", __func__, vma);
1021 }
1022
1023 static void ion_vm_close(struct vm_area_struct *vma)
1024 {
1025 struct ion_buffer *buffer = vma->vm_private_data;
1026 struct ion_vma_list *vma_list, *tmp;
1027
1028 pr_debug("%s\n", __func__);
1029 mutex_lock(&buffer->lock);
1030 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1031 if (vma_list->vma != vma)
1032 continue;
1033 list_del(&vma_list->list);
1034 kfree(vma_list);
1035 pr_debug("%s: deleting %p\n", __func__, vma);
1036 break;
1037 }
1038 mutex_unlock(&buffer->lock);
1039 }
1040
1041 static const struct vm_operations_struct ion_vma_ops = {
1042 .open = ion_vm_open,
1043 .close = ion_vm_close,
1044 .fault = ion_vm_fault,
1045 };
1046
1047 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1048 {
1049 struct ion_buffer *buffer = dmabuf->priv;
1050 int ret = 0;
1051
1052 if (!buffer->heap->ops->map_user) {
1053 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1054 __func__);
1055 return -EINVAL;
1056 }
1057
1058 if (ion_buffer_fault_user_mappings(buffer)) {
1059 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1060 VM_DONTDUMP;
1061 vma->vm_private_data = buffer;
1062 vma->vm_ops = &ion_vma_ops;
1063 ion_vm_open(vma);
1064 return 0;
1065 }
1066
1067 if (!(buffer->flags & ION_FLAG_CACHED))
1068 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1069
1070 mutex_lock(&buffer->lock);
1071 /* now map it to userspace */
1072 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1073 mutex_unlock(&buffer->lock);
1074
1075 if (ret)
1076 pr_err("%s: failure mapping buffer to userspace\n",
1077 __func__);
1078
1079 return ret;
1080 }
1081
1082 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1083 {
1084 struct ion_buffer *buffer = dmabuf->priv;
1085
1086 ion_buffer_put(buffer);
1087 }
1088
1089 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1090 {
1091 struct ion_buffer *buffer = dmabuf->priv;
1092
1093 return buffer->vaddr + offset * PAGE_SIZE;
1094 }
1095
1096 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1097 void *ptr)
1098 {
1099 }
1100
1101 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1102 size_t len,
1103 enum dma_data_direction direction)
1104 {
1105 struct ion_buffer *buffer = dmabuf->priv;
1106 void *vaddr;
1107
1108 if (!buffer->heap->ops->map_kernel) {
1109 pr_err("%s: map kernel is not implemented by this heap.\n",
1110 __func__);
1111 return -ENODEV;
1112 }
1113
1114 mutex_lock(&buffer->lock);
1115 vaddr = ion_buffer_kmap_get(buffer);
1116 mutex_unlock(&buffer->lock);
1117 return PTR_ERR_OR_ZERO(vaddr);
1118 }
1119
1120 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1121 size_t len,
1122 enum dma_data_direction direction)
1123 {
1124 struct ion_buffer *buffer = dmabuf->priv;
1125
1126 mutex_lock(&buffer->lock);
1127 ion_buffer_kmap_put(buffer);
1128 mutex_unlock(&buffer->lock);
1129 }
1130
1131 static struct dma_buf_ops dma_buf_ops = {
1132 .map_dma_buf = ion_map_dma_buf,
1133 .unmap_dma_buf = ion_unmap_dma_buf,
1134 .mmap = ion_mmap,
1135 .release = ion_dma_buf_release,
1136 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1137 .end_cpu_access = ion_dma_buf_end_cpu_access,
1138 .kmap_atomic = ion_dma_buf_kmap,
1139 .kunmap_atomic = ion_dma_buf_kunmap,
1140 .kmap = ion_dma_buf_kmap,
1141 .kunmap = ion_dma_buf_kunmap,
1142 };
1143
1144 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1145 struct ion_handle *handle)
1146 {
1147 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1148 struct ion_buffer *buffer;
1149 struct dma_buf *dmabuf;
1150 bool valid_handle;
1151
1152 mutex_lock(&client->lock);
1153 valid_handle = ion_handle_validate(client, handle);
1154 if (!valid_handle) {
1155 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1156 mutex_unlock(&client->lock);
1157 return ERR_PTR(-EINVAL);
1158 }
1159 buffer = handle->buffer;
1160 ion_buffer_get(buffer);
1161 mutex_unlock(&client->lock);
1162
1163 exp_info.ops = &dma_buf_ops;
1164 exp_info.size = buffer->size;
1165 exp_info.flags = O_RDWR;
1166 exp_info.priv = buffer;
1167
1168 dmabuf = dma_buf_export(&exp_info);
1169 if (IS_ERR(dmabuf)) {
1170 ion_buffer_put(buffer);
1171 return dmabuf;
1172 }
1173
1174 return dmabuf;
1175 }
1176 EXPORT_SYMBOL(ion_share_dma_buf);
1177
1178 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1179 {
1180 struct dma_buf *dmabuf;
1181 int fd;
1182
1183 dmabuf = ion_share_dma_buf(client, handle);
1184 if (IS_ERR(dmabuf))
1185 return PTR_ERR(dmabuf);
1186
1187 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1188 if (fd < 0)
1189 dma_buf_put(dmabuf);
1190
1191 return fd;
1192 }
1193 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1194
1195 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1196 struct dma_buf *dmabuf)
1197 {
1198 struct ion_buffer *buffer;
1199 struct ion_handle *handle;
1200 int ret;
1201
1202 /* if this memory came from ion */
1203
1204 if (dmabuf->ops != &dma_buf_ops) {
1205 pr_err("%s: can not import dmabuf from another exporter\n",
1206 __func__);
1207 return ERR_PTR(-EINVAL);
1208 }
1209 buffer = dmabuf->priv;
1210
1211 mutex_lock(&client->lock);
1212 /* if a handle exists for this buffer just take a reference to it */
1213 handle = ion_handle_lookup(client, buffer);
1214 if (!IS_ERR(handle)) {
1215 ion_handle_get(handle);
1216 mutex_unlock(&client->lock);
1217 goto end;
1218 }
1219
1220 handle = ion_handle_create(client, buffer);
1221 if (IS_ERR(handle)) {
1222 mutex_unlock(&client->lock);
1223 goto end;
1224 }
1225
1226 ret = ion_handle_add(client, handle);
1227 mutex_unlock(&client->lock);
1228 if (ret) {
1229 ion_handle_put(handle);
1230 handle = ERR_PTR(ret);
1231 }
1232
1233 end:
1234 return handle;
1235 }
1236 EXPORT_SYMBOL(ion_import_dma_buf);
1237
1238 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1239 {
1240 struct dma_buf *dmabuf;
1241 struct ion_handle *handle;
1242
1243 dmabuf = dma_buf_get(fd);
1244 if (IS_ERR(dmabuf))
1245 return ERR_CAST(dmabuf);
1246
1247 handle = ion_import_dma_buf(client, dmabuf);
1248 dma_buf_put(dmabuf);
1249 return handle;
1250 }
1251 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1252
1253 static int ion_sync_for_device(struct ion_client *client, int fd)
1254 {
1255 struct dma_buf *dmabuf;
1256 struct ion_buffer *buffer;
1257
1258 dmabuf = dma_buf_get(fd);
1259 if (IS_ERR(dmabuf))
1260 return PTR_ERR(dmabuf);
1261
1262 /* if this memory came from ion */
1263 if (dmabuf->ops != &dma_buf_ops) {
1264 pr_err("%s: can not sync dmabuf from another exporter\n",
1265 __func__);
1266 dma_buf_put(dmabuf);
1267 return -EINVAL;
1268 }
1269 buffer = dmabuf->priv;
1270
1271 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1272 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1273 dma_buf_put(dmabuf);
1274 return 0;
1275 }
1276
1277 /* fix up the cases where the ioctl direction bits are incorrect */
1278 static unsigned int ion_ioctl_dir(unsigned int cmd)
1279 {
1280 switch (cmd) {
1281 case ION_IOC_SYNC:
1282 case ION_IOC_FREE:
1283 case ION_IOC_CUSTOM:
1284 return _IOC_WRITE;
1285 default:
1286 return _IOC_DIR(cmd);
1287 }
1288 }
1289
1290 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1291 {
1292 struct ion_client *client = filp->private_data;
1293 struct ion_device *dev = client->dev;
1294 struct ion_handle *cleanup_handle = NULL;
1295 int ret = 0;
1296 unsigned int dir;
1297
1298 union {
1299 struct ion_fd_data fd;
1300 struct ion_allocation_data allocation;
1301 struct ion_handle_data handle;
1302 struct ion_custom_data custom;
1303 } data;
1304
1305 dir = ion_ioctl_dir(cmd);
1306
1307 if (_IOC_SIZE(cmd) > sizeof(data))
1308 return -EINVAL;
1309
1310 if (dir & _IOC_WRITE)
1311 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1312 return -EFAULT;
1313
1314 switch (cmd) {
1315 case ION_IOC_ALLOC:
1316 {
1317 struct ion_handle *handle;
1318
1319 handle = ion_alloc(client, data.allocation.len,
1320 data.allocation.align,
1321 data.allocation.heap_id_mask,
1322 data.allocation.flags);
1323 if (IS_ERR(handle))
1324 return PTR_ERR(handle);
1325
1326 data.allocation.handle = handle->id;
1327
1328 cleanup_handle = handle;
1329 break;
1330 }
1331 case ION_IOC_FREE:
1332 {
1333 struct ion_handle *handle;
1334
1335 handle = ion_handle_get_by_id(client, data.handle.handle);
1336 if (IS_ERR(handle))
1337 return PTR_ERR(handle);
1338 ion_free(client, handle);
1339 ion_handle_put(handle);
1340 break;
1341 }
1342 case ION_IOC_SHARE:
1343 case ION_IOC_MAP:
1344 {
1345 struct ion_handle *handle;
1346
1347 handle = ion_handle_get_by_id(client, data.handle.handle);
1348 if (IS_ERR(handle))
1349 return PTR_ERR(handle);
1350 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1351 ion_handle_put(handle);
1352 if (data.fd.fd < 0)
1353 ret = data.fd.fd;
1354 break;
1355 }
1356 case ION_IOC_IMPORT:
1357 {
1358 struct ion_handle *handle;
1359
1360 handle = ion_import_dma_buf_fd(client, data.fd.fd);
1361 if (IS_ERR(handle))
1362 ret = PTR_ERR(handle);
1363 else
1364 data.handle.handle = handle->id;
1365 break;
1366 }
1367 case ION_IOC_SYNC:
1368 {
1369 ret = ion_sync_for_device(client, data.fd.fd);
1370 break;
1371 }
1372 case ION_IOC_CUSTOM:
1373 {
1374 if (!dev->custom_ioctl)
1375 return -ENOTTY;
1376 ret = dev->custom_ioctl(client, data.custom.cmd,
1377 data.custom.arg);
1378 break;
1379 }
1380 default:
1381 return -ENOTTY;
1382 }
1383
1384 if (dir & _IOC_READ) {
1385 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1386 if (cleanup_handle)
1387 ion_free(client, cleanup_handle);
1388 return -EFAULT;
1389 }
1390 }
1391 return ret;
1392 }
1393
1394 static int ion_release(struct inode *inode, struct file *file)
1395 {
1396 struct ion_client *client = file->private_data;
1397
1398 pr_debug("%s: %d\n", __func__, __LINE__);
1399 ion_client_destroy(client);
1400 return 0;
1401 }
1402
1403 static int ion_open(struct inode *inode, struct file *file)
1404 {
1405 struct miscdevice *miscdev = file->private_data;
1406 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1407 struct ion_client *client;
1408 char debug_name[64];
1409
1410 pr_debug("%s: %d\n", __func__, __LINE__);
1411 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1412 client = ion_client_create(dev, debug_name);
1413 if (IS_ERR(client))
1414 return PTR_ERR(client);
1415 file->private_data = client;
1416
1417 return 0;
1418 }
1419
1420 static const struct file_operations ion_fops = {
1421 .owner = THIS_MODULE,
1422 .open = ion_open,
1423 .release = ion_release,
1424 .unlocked_ioctl = ion_ioctl,
1425 .compat_ioctl = compat_ion_ioctl,
1426 };
1427
1428 static size_t ion_debug_heap_total(struct ion_client *client,
1429 unsigned int id)
1430 {
1431 size_t size = 0;
1432 struct rb_node *n;
1433
1434 mutex_lock(&client->lock);
1435 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1436 struct ion_handle *handle = rb_entry(n,
1437 struct ion_handle,
1438 node);
1439 if (handle->buffer->heap->id == id)
1440 size += handle->buffer->size;
1441 }
1442 mutex_unlock(&client->lock);
1443 return size;
1444 }
1445
1446 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1447 {
1448 struct ion_heap *heap = s->private;
1449 struct ion_device *dev = heap->dev;
1450 struct rb_node *n;
1451 size_t total_size = 0;
1452 size_t total_orphaned_size = 0;
1453
1454 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1455 seq_puts(s, "----------------------------------------------------\n");
1456
1457 mutex_lock(&debugfs_mutex);
1458 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1459 struct ion_client *client = rb_entry(n, struct ion_client,
1460 node);
1461 size_t size = ion_debug_heap_total(client, heap->id);
1462
1463 if (!size)
1464 continue;
1465 if (client->task) {
1466 char task_comm[TASK_COMM_LEN];
1467
1468 get_task_comm(task_comm, client->task);
1469 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1470 client->pid, size);
1471 } else {
1472 seq_printf(s, "%16s %16u %16zu\n", client->name,
1473 client->pid, size);
1474 }
1475 }
1476 mutex_unlock(&debugfs_mutex);
1477
1478 seq_puts(s, "----------------------------------------------------\n");
1479 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1480 mutex_lock(&dev->buffer_lock);
1481 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1482 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1483 node);
1484 if (buffer->heap->id != heap->id)
1485 continue;
1486 total_size += buffer->size;
1487 if (!buffer->handle_count) {
1488 seq_printf(s, "%16s %16u %16zu %d %d\n",
1489 buffer->task_comm, buffer->pid,
1490 buffer->size, buffer->kmap_cnt,
1491 atomic_read(&buffer->ref.refcount));
1492 total_orphaned_size += buffer->size;
1493 }
1494 }
1495 mutex_unlock(&dev->buffer_lock);
1496 seq_puts(s, "----------------------------------------------------\n");
1497 seq_printf(s, "%16s %16zu\n", "total orphaned",
1498 total_orphaned_size);
1499 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1500 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1501 seq_printf(s, "%16s %16zu\n", "deferred free",
1502 heap->free_list_size);
1503 seq_puts(s, "----------------------------------------------------\n");
1504
1505 if (heap->debug_show)
1506 heap->debug_show(heap, s, unused);
1507
1508 return 0;
1509 }
1510
1511 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1512 {
1513 return single_open(file, ion_debug_heap_show, inode->i_private);
1514 }
1515
1516 static const struct file_operations debug_heap_fops = {
1517 .open = ion_debug_heap_open,
1518 .read = seq_read,
1519 .llseek = seq_lseek,
1520 .release = single_release,
1521 };
1522
1523 static int debug_shrink_set(void *data, u64 val)
1524 {
1525 struct ion_heap *heap = data;
1526 struct shrink_control sc;
1527 int objs;
1528
1529 sc.gfp_mask = -1;
1530 sc.nr_to_scan = val;
1531
1532 if (!val) {
1533 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1534 sc.nr_to_scan = objs;
1535 }
1536
1537 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1538 return 0;
1539 }
1540
1541 static int debug_shrink_get(void *data, u64 *val)
1542 {
1543 struct ion_heap *heap = data;
1544 struct shrink_control sc;
1545 int objs;
1546
1547 sc.gfp_mask = -1;
1548 sc.nr_to_scan = 0;
1549
1550 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1551 *val = objs;
1552 return 0;
1553 }
1554
1555 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1556 debug_shrink_set, "%llu\n");
1557
1558 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1559 {
1560 struct dentry *debug_file;
1561
1562 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1563 !heap->ops->unmap_dma)
1564 pr_err("%s: can not add heap with invalid ops struct.\n",
1565 __func__);
1566
1567 spin_lock_init(&heap->free_lock);
1568 heap->free_list_size = 0;
1569
1570 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1571 ion_heap_init_deferred_free(heap);
1572
1573 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1574 ion_heap_init_shrinker(heap);
1575
1576 heap->dev = dev;
1577 down_write(&dev->lock);
1578 /*
1579 * use negative heap->id to reverse the priority -- when traversing
1580 * the list later attempt higher id numbers first
1581 */
1582 plist_node_init(&heap->node, -heap->id);
1583 plist_add(&heap->node, &dev->heaps);
1584 debug_file = debugfs_create_file(heap->name, 0664,
1585 dev->heaps_debug_root, heap,
1586 &debug_heap_fops);
1587
1588 if (!debug_file) {
1589 char buf[256], *path;
1590
1591 path = dentry_path(dev->heaps_debug_root, buf, 256);
1592 pr_err("Failed to create heap debugfs at %s/%s\n",
1593 path, heap->name);
1594 }
1595
1596 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1597 char debug_name[64];
1598
1599 snprintf(debug_name, 64, "%s_shrink", heap->name);
1600 debug_file = debugfs_create_file(
1601 debug_name, 0644, dev->heaps_debug_root, heap,
1602 &debug_shrink_fops);
1603 if (!debug_file) {
1604 char buf[256], *path;
1605
1606 path = dentry_path(dev->heaps_debug_root, buf, 256);
1607 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1608 path, debug_name);
1609 }
1610 }
1611
1612 up_write(&dev->lock);
1613 }
1614 EXPORT_SYMBOL(ion_device_add_heap);
1615
1616 struct ion_device *ion_device_create(long (*custom_ioctl)
1617 (struct ion_client *client,
1618 unsigned int cmd,
1619 unsigned long arg))
1620 {
1621 struct ion_device *idev;
1622 int ret;
1623
1624 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1625 if (!idev)
1626 return ERR_PTR(-ENOMEM);
1627
1628 idev->dev.minor = MISC_DYNAMIC_MINOR;
1629 idev->dev.name = "ion";
1630 idev->dev.fops = &ion_fops;
1631 idev->dev.parent = NULL;
1632 ret = misc_register(&idev->dev);
1633 if (ret) {
1634 pr_err("ion: failed to register misc device.\n");
1635 kfree(idev);
1636 return ERR_PTR(ret);
1637 }
1638
1639 idev->debug_root = debugfs_create_dir("ion", NULL);
1640 if (!idev->debug_root) {
1641 pr_err("ion: failed to create debugfs root directory.\n");
1642 goto debugfs_done;
1643 }
1644 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1645 if (!idev->heaps_debug_root) {
1646 pr_err("ion: failed to create debugfs heaps directory.\n");
1647 goto debugfs_done;
1648 }
1649 idev->clients_debug_root = debugfs_create_dir("clients",
1650 idev->debug_root);
1651 if (!idev->clients_debug_root)
1652 pr_err("ion: failed to create debugfs clients directory.\n");
1653
1654 debugfs_done:
1655
1656 idev->custom_ioctl = custom_ioctl;
1657 idev->buffers = RB_ROOT;
1658 mutex_init(&idev->buffer_lock);
1659 init_rwsem(&idev->lock);
1660 plist_head_init(&idev->heaps);
1661 idev->clients = RB_ROOT;
1662 ion_root_client = &idev->clients;
1663 mutex_init(&debugfs_mutex);
1664 return idev;
1665 }
1666 EXPORT_SYMBOL(ion_device_create);
1667
1668 void ion_device_destroy(struct ion_device *dev)
1669 {
1670 misc_deregister(&dev->dev);
1671 debugfs_remove_recursive(dev->debug_root);
1672 /* XXX need to free the heaps and clients ? */
1673 kfree(dev);
1674 }
1675 EXPORT_SYMBOL(ion_device_destroy);
1676
1677 void __init ion_reserve(struct ion_platform_data *data)
1678 {
1679 int i;
1680
1681 for (i = 0; i < data->nr; i++) {
1682 if (data->heaps[i].size == 0)
1683 continue;
1684
1685 if (data->heaps[i].base == 0) {
1686 phys_addr_t paddr;
1687
1688 paddr = memblock_alloc_base(data->heaps[i].size,
1689 data->heaps[i].align,
1690 MEMBLOCK_ALLOC_ANYWHERE);
1691 if (!paddr) {
1692 pr_err("%s: error allocating memblock for heap %d\n",
1693 __func__, i);
1694 continue;
1695 }
1696 data->heaps[i].base = paddr;
1697 } else {
1698 int ret = memblock_reserve(data->heaps[i].base,
1699 data->heaps[i].size);
1700 if (ret)
1701 pr_err("memblock reserve of %zx@%lx failed\n",
1702 data->heaps[i].size,
1703 data->heaps[i].base);
1704 }
1705 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1706 data->heaps[i].name,
1707 data->heaps[i].base,
1708 data->heaps[i].size);
1709 }
1710 }