]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - drivers/staging/android/ion/ion_page_pool.c
ION: Sys_heap: Add cached pool to spead up cached buffer alloc
[thirdparty/kernel/linux.git] / drivers / staging / android / ion / ion_page_pool.c
CommitLineData
0214c7f2
RSZ
1/*
2 * drivers/staging/android/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
797a95c4 17#include <linux/debugfs.h>
0214c7f2
RSZ
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
797a95c4 20#include <linux/fs.h>
0214c7f2 21#include <linux/list.h>
30d79792 22#include <linux/init.h>
0214c7f2 23#include <linux/slab.h>
0cd2dc4d 24#include <linux/swap.h>
0214c7f2
RSZ
25#include "ion_priv.h"
26
0214c7f2
RSZ
27static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
28{
29 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
30
31 if (!page)
32 return NULL;
e7f63771
CF
33 if (!pool->cached)
34 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
35 DMA_BIDIRECTIONAL);
0214c7f2
RSZ
36 return page;
37}
38
39static void ion_page_pool_free_pages(struct ion_page_pool *pool,
40 struct page *page)
41{
42 __free_pages(page, pool->order);
43}
44
45static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
46{
efee5a0c 47 mutex_lock(&pool->mutex);
0fb9b815 48 if (PageHighMem(page)) {
38c003b1 49 list_add_tail(&page->lru, &pool->high_items);
0fb9b815
RSZ
50 pool->high_count++;
51 } else {
38c003b1 52 list_add_tail(&page->lru, &pool->low_items);
0fb9b815
RSZ
53 pool->low_count++;
54 }
efee5a0c 55 mutex_unlock(&pool->mutex);
0214c7f2
RSZ
56 return 0;
57}
58
0fb9b815 59static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
0214c7f2 60{
0214c7f2
RSZ
61 struct page *page;
62
0fb9b815
RSZ
63 if (high) {
64 BUG_ON(!pool->high_count);
38c003b1 65 page = list_first_entry(&pool->high_items, struct page, lru);
0fb9b815
RSZ
66 pool->high_count--;
67 } else {
68 BUG_ON(!pool->low_count);
38c003b1 69 page = list_first_entry(&pool->low_items, struct page, lru);
0fb9b815
RSZ
70 pool->low_count--;
71 }
0214c7f2 72
38c003b1 73 list_del(&page->lru);
0214c7f2
RSZ
74 return page;
75}
76
79240748 77struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
0214c7f2
RSZ
78{
79 struct page *page = NULL;
80
81 BUG_ON(!pool);
82
83 mutex_lock(&pool->mutex);
0fb9b815
RSZ
84 if (pool->high_count)
85 page = ion_page_pool_remove(pool, true);
86 else if (pool->low_count)
87 page = ion_page_pool_remove(pool, false);
0214c7f2
RSZ
88 mutex_unlock(&pool->mutex);
89
0fb9b815
RSZ
90 if (!page)
91 page = ion_page_pool_alloc_pages(pool);
92
0214c7f2
RSZ
93 return page;
94}
95
e1d855b0 96void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
0214c7f2
RSZ
97{
98 int ret;
99
bdeb9f1c
HS
100 BUG_ON(pool->order != compound_order(page));
101
0214c7f2
RSZ
102 ret = ion_page_pool_add(pool, page);
103 if (ret)
104 ion_page_pool_free_pages(pool, page);
0214c7f2
RSZ
105}
106
ea313b5f 107static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
797a95c4 108{
80cb77dc 109 int count = pool->low_count;
797a95c4 110
80cb77dc
HS
111 if (high)
112 count += pool->high_count;
113
114 return count << pool->order;
797a95c4
RSZ
115}
116
ea313b5f
RSZ
117int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
118 int nr_to_scan)
0214c7f2 119{
b44d9ce3 120 int freed = 0;
0fb9b815
RSZ
121 bool high;
122
0cd2dc4d 123 if (current_is_kswapd())
17fbab1e 124 high = true;
0cd2dc4d
HS
125 else
126 high = !!(gfp_mask & __GFP_HIGHMEM);
0214c7f2 127
797a95c4 128 if (nr_to_scan == 0)
ea313b5f
RSZ
129 return ion_page_pool_total(pool, high);
130
b44d9ce3 131 while (freed < nr_to_scan) {
ea313b5f
RSZ
132 struct page *page;
133
134 mutex_lock(&pool->mutex);
ce3d1093 135 if (pool->low_count) {
ea313b5f 136 page = ion_page_pool_remove(pool, false);
ce3d1093
CC
137 } else if (high && pool->high_count) {
138 page = ion_page_pool_remove(pool, true);
ea313b5f 139 } else {
0fb9b815 140 mutex_unlock(&pool->mutex);
ea313b5f 141 break;
0214c7f2 142 }
ea313b5f
RSZ
143 mutex_unlock(&pool->mutex);
144 ion_page_pool_free_pages(pool, page);
b44d9ce3 145 freed += (1 << pool->order);
0214c7f2 146 }
0214c7f2 147
b9daf0b6 148 return freed;
0214c7f2
RSZ
149}
150
e7f63771
CF
151struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
152 bool cached)
0214c7f2 153{
8fb78ad6
BM
154 struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
155
0214c7f2
RSZ
156 if (!pool)
157 return NULL;
0fb9b815
RSZ
158 pool->high_count = 0;
159 pool->low_count = 0;
160 INIT_LIST_HEAD(&pool->low_items);
161 INIT_LIST_HEAD(&pool->high_items);
bdeb9f1c 162 pool->gfp_mask = gfp_mask | __GFP_COMP;
0214c7f2
RSZ
163 pool->order = order;
164 mutex_init(&pool->mutex);
797a95c4 165 plist_node_init(&pool->list, order);
e7f63771
CF
166 if (cached)
167 pool->cached = true;
0214c7f2
RSZ
168
169 return pool;
170}
171
172void ion_page_pool_destroy(struct ion_page_pool *pool)
173{
0214c7f2
RSZ
174 kfree(pool);
175}
176
797a95c4
RSZ
177static int __init ion_page_pool_init(void)
178{
797a95c4
RSZ
179 return 0;
180}
30d79792 181device_initcall(ion_page_pool_init);