]>
Commit | Line | Data |
---|---|---|
61989a80 NG |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
31fc00bb | 5 | * Copyright (C) 2012, 2013 Minchan Kim |
61989a80 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the license that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | */ | |
13 | ||
2db51dae | 14 | /* |
c3e3e88a NC |
15 | * This allocator is designed for use with zram. Thus, the allocator is |
16 | * supposed to work well under low memory conditions. In particular, it | |
17 | * never attempts higher order page allocation which is very likely to | |
18 | * fail under memory pressure. On the other hand, if we just use single | |
19 | * (0-order) pages, it would suffer from very high fragmentation -- | |
20 | * any object of size PAGE_SIZE/2 or larger would occupy an entire page. | |
21 | * This was one of the major issues with its predecessor (xvmalloc). | |
2db51dae NG |
22 | * |
23 | * To overcome these issues, zsmalloc allocates a bunch of 0-order pages | |
24 | * and links them together using various 'struct page' fields. These linked | |
25 | * pages act as a single higher-order page i.e. an object can span 0-order | |
26 | * page boundaries. The code refers to these linked pages as a single entity | |
27 | * called zspage. | |
28 | * | |
c3e3e88a NC |
29 | * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE |
30 | * since this satisfies the requirements of all its current users (in the | |
31 | * worst case, page is incompressible and is thus stored "as-is" i.e. in | |
32 | * uncompressed form). For allocation requests larger than this size, failure | |
33 | * is returned (see zs_malloc). | |
34 | * | |
35 | * Additionally, zs_malloc() does not return a dereferenceable pointer. | |
36 | * Instead, it returns an opaque handle (unsigned long) which encodes actual | |
37 | * location of the allocated object. The reason for this indirection is that | |
38 | * zsmalloc does not keep zspages permanently mapped since that would cause | |
39 | * issues on 32-bit systems where the VA region for kernel space mappings | |
40 | * is very small. So, before using the allocating memory, the object has to | |
41 | * be mapped using zs_map_object() to get a usable pointer and subsequently | |
42 | * unmapped using zs_unmap_object(). | |
43 | * | |
2db51dae NG |
44 | * Following is how we use various fields and flags of underlying |
45 | * struct page(s) to form a zspage. | |
46 | * | |
47 | * Usage of struct page fields: | |
48 | * page->first_page: points to the first component (0-order) page | |
49 | * page->index (union with page->freelist): offset of the first object | |
50 | * starting in this page. For the first page, this is | |
51 | * always 0, so we use this field (aka freelist) to point | |
52 | * to the first free object in zspage. | |
53 | * page->lru: links together all component pages (except the first page) | |
54 | * of a zspage | |
55 | * | |
56 | * For _first_ page only: | |
57 | * | |
58 | * page->private (union with page->first_page): refers to the | |
59 | * component page after the first page | |
60 | * page->freelist: points to the first free object in zspage. | |
61 | * Free objects are linked together using in-place | |
62 | * metadata. | |
63 | * page->objects: maximum number of objects we can store in this | |
64 | * zspage (class->zspage_order * PAGE_SIZE / class->size) | |
65 | * page->lru: links together first pages of various zspages. | |
66 | * Basically forming list of zspages in a fullness group. | |
67 | * page->mapping: class index and fullness group of the zspage | |
68 | * | |
69 | * Usage of struct page flags: | |
70 | * PG_private: identifies the first component page | |
71 | * PG_private2: identifies the last component page | |
72 | * | |
73 | */ | |
74 | ||
61989a80 NG |
75 | #ifdef CONFIG_ZSMALLOC_DEBUG |
76 | #define DEBUG | |
77 | #endif | |
78 | ||
79 | #include <linux/module.h> | |
80 | #include <linux/kernel.h> | |
81 | #include <linux/bitops.h> | |
82 | #include <linux/errno.h> | |
83 | #include <linux/highmem.h> | |
61989a80 NG |
84 | #include <linux/string.h> |
85 | #include <linux/slab.h> | |
86 | #include <asm/tlbflush.h> | |
87 | #include <asm/pgtable.h> | |
88 | #include <linux/cpumask.h> | |
89 | #include <linux/cpu.h> | |
0cbb613f | 90 | #include <linux/vmalloc.h> |
c60369f0 | 91 | #include <linux/hardirq.h> |
0959c63f SJ |
92 | #include <linux/spinlock.h> |
93 | #include <linux/types.h> | |
0f050d99 | 94 | #include <linux/debugfs.h> |
bcf1647d | 95 | #include <linux/zsmalloc.h> |
c795779d | 96 | #include <linux/zpool.h> |
0959c63f SJ |
97 | |
98 | /* | |
99 | * This must be power of 2 and greater than of equal to sizeof(link_free). | |
100 | * These two conditions ensure that any 'struct link_free' itself doesn't | |
101 | * span more than 1 page which avoids complex case of mapping 2 pages simply | |
102 | * to restore link_free pointer values. | |
103 | */ | |
104 | #define ZS_ALIGN 8 | |
105 | ||
106 | /* | |
107 | * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) | |
108 | * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. | |
109 | */ | |
110 | #define ZS_MAX_ZSPAGE_ORDER 2 | |
111 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) | |
112 | ||
2e40e163 MK |
113 | #define ZS_HANDLE_SIZE (sizeof(unsigned long)) |
114 | ||
0959c63f SJ |
115 | /* |
116 | * Object location (<PFN>, <obj_idx>) is encoded as | |
c3e3e88a | 117 | * as single (unsigned long) handle value. |
0959c63f SJ |
118 | * |
119 | * Note that object index <obj_idx> is relative to system | |
120 | * page <PFN> it is stored in, so for each sub-page belonging | |
121 | * to a zspage, obj_idx starts with 0. | |
122 | * | |
123 | * This is made more complicated by various memory models and PAE. | |
124 | */ | |
125 | ||
126 | #ifndef MAX_PHYSMEM_BITS | |
127 | #ifdef CONFIG_HIGHMEM64G | |
128 | #define MAX_PHYSMEM_BITS 36 | |
129 | #else /* !CONFIG_HIGHMEM64G */ | |
130 | /* | |
131 | * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just | |
132 | * be PAGE_SHIFT | |
133 | */ | |
134 | #define MAX_PHYSMEM_BITS BITS_PER_LONG | |
135 | #endif | |
136 | #endif | |
137 | #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) | |
138 | #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) | |
139 | #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) | |
140 | ||
141 | #define MAX(a, b) ((a) >= (b) ? (a) : (b)) | |
142 | /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ | |
143 | #define ZS_MIN_ALLOC_SIZE \ | |
144 | MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) | |
2e40e163 MK |
145 | /* each chunk includes extra space to keep handle */ |
146 | #define ZS_MAX_ALLOC_SIZE (PAGE_SIZE + ZS_HANDLE_SIZE) | |
0959c63f SJ |
147 | |
148 | /* | |
7eb52512 | 149 | * On systems with 4K page size, this gives 255 size classes! There is a |
0959c63f SJ |
150 | * trader-off here: |
151 | * - Large number of size classes is potentially wasteful as free page are | |
152 | * spread across these classes | |
153 | * - Small number of size classes causes large internal fragmentation | |
154 | * - Probably its better to use specific size classes (empirically | |
155 | * determined). NOTE: all those class sizes must be set as multiple of | |
156 | * ZS_ALIGN to make sure link_free itself never has to span 2 pages. | |
157 | * | |
158 | * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN | |
159 | * (reason above) | |
160 | */ | |
d662b8eb | 161 | #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) |
0959c63f SJ |
162 | |
163 | /* | |
164 | * We do not maintain any list for completely empty or full pages | |
165 | */ | |
166 | enum fullness_group { | |
167 | ZS_ALMOST_FULL, | |
168 | ZS_ALMOST_EMPTY, | |
169 | _ZS_NR_FULLNESS_GROUPS, | |
170 | ||
171 | ZS_EMPTY, | |
172 | ZS_FULL | |
173 | }; | |
174 | ||
0f050d99 GM |
175 | enum zs_stat_type { |
176 | OBJ_ALLOCATED, | |
177 | OBJ_USED, | |
178 | NR_ZS_STAT_TYPE, | |
179 | }; | |
180 | ||
181 | #ifdef CONFIG_ZSMALLOC_STAT | |
182 | ||
183 | static struct dentry *zs_stat_root; | |
184 | ||
185 | struct zs_size_stat { | |
186 | unsigned long objs[NR_ZS_STAT_TYPE]; | |
187 | }; | |
188 | ||
189 | #endif | |
190 | ||
40f9fb8c MG |
191 | /* |
192 | * number of size_classes | |
193 | */ | |
194 | static int zs_size_classes; | |
195 | ||
0959c63f SJ |
196 | /* |
197 | * We assign a page to ZS_ALMOST_EMPTY fullness group when: | |
198 | * n <= N / f, where | |
199 | * n = number of allocated objects | |
200 | * N = total number of objects zspage can store | |
6dd9737e | 201 | * f = fullness_threshold_frac |
0959c63f SJ |
202 | * |
203 | * Similarly, we assign zspage to: | |
204 | * ZS_ALMOST_FULL when n > N / f | |
205 | * ZS_EMPTY when n == 0 | |
206 | * ZS_FULL when n == N | |
207 | * | |
208 | * (see: fix_fullness_group()) | |
209 | */ | |
210 | static const int fullness_threshold_frac = 4; | |
211 | ||
212 | struct size_class { | |
213 | /* | |
214 | * Size of objects stored in this class. Must be multiple | |
215 | * of ZS_ALIGN. | |
216 | */ | |
217 | int size; | |
218 | unsigned int index; | |
219 | ||
220 | /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ | |
221 | int pages_per_zspage; | |
222 | ||
0f050d99 GM |
223 | #ifdef CONFIG_ZSMALLOC_STAT |
224 | struct zs_size_stat stats; | |
225 | #endif | |
226 | ||
0959c63f SJ |
227 | spinlock_t lock; |
228 | ||
0959c63f SJ |
229 | struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; |
230 | }; | |
231 | ||
232 | /* | |
233 | * Placed within free objects to form a singly linked list. | |
234 | * For every zspage, first_page->freelist gives head of this list. | |
235 | * | |
236 | * This must be power of 2 and less than or equal to ZS_ALIGN | |
237 | */ | |
238 | struct link_free { | |
2e40e163 MK |
239 | union { |
240 | /* | |
241 | * Position of next free chunk (encodes <PFN, obj_idx>) | |
242 | * It's valid for non-allocated object | |
243 | */ | |
244 | void *next; | |
245 | /* | |
246 | * Handle of allocated object. | |
247 | */ | |
248 | unsigned long handle; | |
249 | }; | |
0959c63f SJ |
250 | }; |
251 | ||
252 | struct zs_pool { | |
0f050d99 GM |
253 | char *name; |
254 | ||
40f9fb8c | 255 | struct size_class **size_class; |
2e40e163 | 256 | struct kmem_cache *handle_cachep; |
0959c63f SJ |
257 | |
258 | gfp_t flags; /* allocation flags used when growing pool */ | |
13de8933 | 259 | atomic_long_t pages_allocated; |
0f050d99 GM |
260 | |
261 | #ifdef CONFIG_ZSMALLOC_STAT | |
262 | struct dentry *stat_dentry; | |
263 | #endif | |
0959c63f | 264 | }; |
61989a80 NG |
265 | |
266 | /* | |
267 | * A zspage's class index and fullness group | |
268 | * are encoded in its (first)page->mapping | |
269 | */ | |
270 | #define CLASS_IDX_BITS 28 | |
271 | #define FULLNESS_BITS 4 | |
272 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
273 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
274 | ||
f553646a | 275 | struct mapping_area { |
1b945aee | 276 | #ifdef CONFIG_PGTABLE_MAPPING |
f553646a SJ |
277 | struct vm_struct *vm; /* vm area for mapping object that span pages */ |
278 | #else | |
279 | char *vm_buf; /* copy buffer for objects that span pages */ | |
280 | #endif | |
281 | char *vm_addr; /* address of kmap_atomic()'ed pages */ | |
282 | enum zs_mapmode vm_mm; /* mapping mode */ | |
283 | }; | |
284 | ||
2e40e163 MK |
285 | static int create_handle_cache(struct zs_pool *pool) |
286 | { | |
287 | pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, | |
288 | 0, 0, NULL); | |
289 | return pool->handle_cachep ? 0 : 1; | |
290 | } | |
291 | ||
292 | static void destroy_handle_cache(struct zs_pool *pool) | |
293 | { | |
294 | kmem_cache_destroy(pool->handle_cachep); | |
295 | } | |
296 | ||
297 | static unsigned long alloc_handle(struct zs_pool *pool) | |
298 | { | |
299 | return (unsigned long)kmem_cache_alloc(pool->handle_cachep, | |
300 | pool->flags & ~__GFP_HIGHMEM); | |
301 | } | |
302 | ||
303 | static void free_handle(struct zs_pool *pool, unsigned long handle) | |
304 | { | |
305 | kmem_cache_free(pool->handle_cachep, (void *)handle); | |
306 | } | |
307 | ||
308 | static void record_obj(unsigned long handle, unsigned long obj) | |
309 | { | |
310 | *(unsigned long *)handle = obj; | |
311 | } | |
312 | ||
c795779d DS |
313 | /* zpool driver */ |
314 | ||
315 | #ifdef CONFIG_ZPOOL | |
316 | ||
3eba0c6a | 317 | static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops) |
c795779d | 318 | { |
3eba0c6a | 319 | return zs_create_pool(name, gfp); |
c795779d DS |
320 | } |
321 | ||
322 | static void zs_zpool_destroy(void *pool) | |
323 | { | |
324 | zs_destroy_pool(pool); | |
325 | } | |
326 | ||
327 | static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, | |
328 | unsigned long *handle) | |
329 | { | |
330 | *handle = zs_malloc(pool, size); | |
331 | return *handle ? 0 : -1; | |
332 | } | |
333 | static void zs_zpool_free(void *pool, unsigned long handle) | |
334 | { | |
335 | zs_free(pool, handle); | |
336 | } | |
337 | ||
338 | static int zs_zpool_shrink(void *pool, unsigned int pages, | |
339 | unsigned int *reclaimed) | |
340 | { | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
344 | static void *zs_zpool_map(void *pool, unsigned long handle, | |
345 | enum zpool_mapmode mm) | |
346 | { | |
347 | enum zs_mapmode zs_mm; | |
348 | ||
349 | switch (mm) { | |
350 | case ZPOOL_MM_RO: | |
351 | zs_mm = ZS_MM_RO; | |
352 | break; | |
353 | case ZPOOL_MM_WO: | |
354 | zs_mm = ZS_MM_WO; | |
355 | break; | |
356 | case ZPOOL_MM_RW: /* fallthru */ | |
357 | default: | |
358 | zs_mm = ZS_MM_RW; | |
359 | break; | |
360 | } | |
361 | ||
362 | return zs_map_object(pool, handle, zs_mm); | |
363 | } | |
364 | static void zs_zpool_unmap(void *pool, unsigned long handle) | |
365 | { | |
366 | zs_unmap_object(pool, handle); | |
367 | } | |
368 | ||
369 | static u64 zs_zpool_total_size(void *pool) | |
370 | { | |
722cdc17 | 371 | return zs_get_total_pages(pool) << PAGE_SHIFT; |
c795779d DS |
372 | } |
373 | ||
374 | static struct zpool_driver zs_zpool_driver = { | |
375 | .type = "zsmalloc", | |
376 | .owner = THIS_MODULE, | |
377 | .create = zs_zpool_create, | |
378 | .destroy = zs_zpool_destroy, | |
379 | .malloc = zs_zpool_malloc, | |
380 | .free = zs_zpool_free, | |
381 | .shrink = zs_zpool_shrink, | |
382 | .map = zs_zpool_map, | |
383 | .unmap = zs_zpool_unmap, | |
384 | .total_size = zs_zpool_total_size, | |
385 | }; | |
386 | ||
137f8cff | 387 | MODULE_ALIAS("zpool-zsmalloc"); |
c795779d DS |
388 | #endif /* CONFIG_ZPOOL */ |
389 | ||
61989a80 NG |
390 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
391 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
392 | ||
393 | static int is_first_page(struct page *page) | |
394 | { | |
a27545bf | 395 | return PagePrivate(page); |
61989a80 NG |
396 | } |
397 | ||
398 | static int is_last_page(struct page *page) | |
399 | { | |
a27545bf | 400 | return PagePrivate2(page); |
61989a80 NG |
401 | } |
402 | ||
403 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
404 | enum fullness_group *fullness) | |
405 | { | |
406 | unsigned long m; | |
407 | BUG_ON(!is_first_page(page)); | |
408 | ||
409 | m = (unsigned long)page->mapping; | |
410 | *fullness = m & FULLNESS_MASK; | |
411 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
412 | } | |
413 | ||
414 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
415 | enum fullness_group fullness) | |
416 | { | |
417 | unsigned long m; | |
418 | BUG_ON(!is_first_page(page)); | |
419 | ||
420 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
421 | (fullness & FULLNESS_MASK); | |
422 | page->mapping = (struct address_space *)m; | |
423 | } | |
424 | ||
c3e3e88a NC |
425 | /* |
426 | * zsmalloc divides the pool into various size classes where each | |
427 | * class maintains a list of zspages where each zspage is divided | |
428 | * into equal sized chunks. Each allocation falls into one of these | |
429 | * classes depending on its size. This function returns index of the | |
430 | * size class which has chunk size big enough to hold the give size. | |
431 | */ | |
61989a80 NG |
432 | static int get_size_class_index(int size) |
433 | { | |
434 | int idx = 0; | |
435 | ||
436 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
437 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
438 | ZS_SIZE_CLASS_DELTA); | |
439 | ||
440 | return idx; | |
441 | } | |
442 | ||
c3e3e88a NC |
443 | /* |
444 | * For each size class, zspages are divided into different groups | |
445 | * depending on how "full" they are. This was done so that we could | |
446 | * easily find empty or nearly empty zspages when we try to shrink | |
447 | * the pool (not yet implemented). This function returns fullness | |
448 | * status of the given page. | |
449 | */ | |
61989a80 NG |
450 | static enum fullness_group get_fullness_group(struct page *page) |
451 | { | |
452 | int inuse, max_objects; | |
453 | enum fullness_group fg; | |
454 | BUG_ON(!is_first_page(page)); | |
455 | ||
456 | inuse = page->inuse; | |
457 | max_objects = page->objects; | |
458 | ||
459 | if (inuse == 0) | |
460 | fg = ZS_EMPTY; | |
461 | else if (inuse == max_objects) | |
462 | fg = ZS_FULL; | |
463 | else if (inuse <= max_objects / fullness_threshold_frac) | |
464 | fg = ZS_ALMOST_EMPTY; | |
465 | else | |
466 | fg = ZS_ALMOST_FULL; | |
467 | ||
468 | return fg; | |
469 | } | |
470 | ||
c3e3e88a NC |
471 | /* |
472 | * Each size class maintains various freelists and zspages are assigned | |
473 | * to one of these freelists based on the number of live objects they | |
474 | * have. This functions inserts the given zspage into the freelist | |
475 | * identified by <class, fullness_group>. | |
476 | */ | |
61989a80 NG |
477 | static void insert_zspage(struct page *page, struct size_class *class, |
478 | enum fullness_group fullness) | |
479 | { | |
480 | struct page **head; | |
481 | ||
482 | BUG_ON(!is_first_page(page)); | |
483 | ||
484 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
485 | return; | |
486 | ||
487 | head = &class->fullness_list[fullness]; | |
488 | if (*head) | |
489 | list_add_tail(&page->lru, &(*head)->lru); | |
490 | ||
491 | *head = page; | |
492 | } | |
493 | ||
c3e3e88a NC |
494 | /* |
495 | * This function removes the given zspage from the freelist identified | |
496 | * by <class, fullness_group>. | |
497 | */ | |
61989a80 NG |
498 | static void remove_zspage(struct page *page, struct size_class *class, |
499 | enum fullness_group fullness) | |
500 | { | |
501 | struct page **head; | |
502 | ||
503 | BUG_ON(!is_first_page(page)); | |
504 | ||
505 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
506 | return; | |
507 | ||
508 | head = &class->fullness_list[fullness]; | |
509 | BUG_ON(!*head); | |
510 | if (list_empty(&(*head)->lru)) | |
511 | *head = NULL; | |
512 | else if (*head == page) | |
513 | *head = (struct page *)list_entry((*head)->lru.next, | |
514 | struct page, lru); | |
515 | ||
516 | list_del_init(&page->lru); | |
517 | } | |
518 | ||
c3e3e88a NC |
519 | /* |
520 | * Each size class maintains zspages in different fullness groups depending | |
521 | * on the number of live objects they contain. When allocating or freeing | |
522 | * objects, the fullness status of the page can change, say, from ALMOST_FULL | |
523 | * to ALMOST_EMPTY when freeing an object. This function checks if such | |
524 | * a status change has occurred for the given page and accordingly moves the | |
525 | * page from the freelist of the old fullness group to that of the new | |
526 | * fullness group. | |
527 | */ | |
61989a80 NG |
528 | static enum fullness_group fix_fullness_group(struct zs_pool *pool, |
529 | struct page *page) | |
530 | { | |
531 | int class_idx; | |
532 | struct size_class *class; | |
533 | enum fullness_group currfg, newfg; | |
534 | ||
535 | BUG_ON(!is_first_page(page)); | |
536 | ||
537 | get_zspage_mapping(page, &class_idx, &currfg); | |
538 | newfg = get_fullness_group(page); | |
539 | if (newfg == currfg) | |
540 | goto out; | |
541 | ||
9eec4cd5 | 542 | class = pool->size_class[class_idx]; |
61989a80 NG |
543 | remove_zspage(page, class, currfg); |
544 | insert_zspage(page, class, newfg); | |
545 | set_zspage_mapping(page, class_idx, newfg); | |
546 | ||
547 | out: | |
548 | return newfg; | |
549 | } | |
550 | ||
551 | /* | |
552 | * We have to decide on how many pages to link together | |
553 | * to form a zspage for each size class. This is important | |
554 | * to reduce wastage due to unusable space left at end of | |
555 | * each zspage which is given as: | |
556 | * wastage = Zp - Zp % size_class | |
557 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
558 | * | |
559 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
560 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
561 | * since then we can perfectly fit in 8 such objects. | |
562 | */ | |
2e3b6154 | 563 | static int get_pages_per_zspage(int class_size) |
61989a80 NG |
564 | { |
565 | int i, max_usedpc = 0; | |
566 | /* zspage order which gives maximum used size per KB */ | |
567 | int max_usedpc_order = 1; | |
568 | ||
84d4faab | 569 | for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { |
61989a80 NG |
570 | int zspage_size; |
571 | int waste, usedpc; | |
572 | ||
573 | zspage_size = i * PAGE_SIZE; | |
574 | waste = zspage_size % class_size; | |
575 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
576 | ||
577 | if (usedpc > max_usedpc) { | |
578 | max_usedpc = usedpc; | |
579 | max_usedpc_order = i; | |
580 | } | |
581 | } | |
582 | ||
583 | return max_usedpc_order; | |
584 | } | |
585 | ||
586 | /* | |
587 | * A single 'zspage' is composed of many system pages which are | |
588 | * linked together using fields in struct page. This function finds | |
589 | * the first/head page, given any component page of a zspage. | |
590 | */ | |
591 | static struct page *get_first_page(struct page *page) | |
592 | { | |
593 | if (is_first_page(page)) | |
594 | return page; | |
595 | else | |
596 | return page->first_page; | |
597 | } | |
598 | ||
599 | static struct page *get_next_page(struct page *page) | |
600 | { | |
601 | struct page *next; | |
602 | ||
603 | if (is_last_page(page)) | |
604 | next = NULL; | |
605 | else if (is_first_page(page)) | |
e842b976 | 606 | next = (struct page *)page_private(page); |
61989a80 NG |
607 | else |
608 | next = list_entry(page->lru.next, struct page, lru); | |
609 | ||
610 | return next; | |
611 | } | |
612 | ||
67296874 OH |
613 | /* |
614 | * Encode <page, obj_idx> as a single handle value. | |
615 | * On hardware platforms with physical memory starting at 0x0 the pfn | |
616 | * could be 0 so we ensure that the handle will never be 0 by adjusting the | |
617 | * encoded obj_idx value before encoding. | |
618 | */ | |
61989a80 NG |
619 | static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) |
620 | { | |
621 | unsigned long handle; | |
622 | ||
623 | if (!page) { | |
624 | BUG_ON(obj_idx); | |
625 | return NULL; | |
626 | } | |
627 | ||
628 | handle = page_to_pfn(page) << OBJ_INDEX_BITS; | |
67296874 | 629 | handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); |
61989a80 NG |
630 | |
631 | return (void *)handle; | |
632 | } | |
633 | ||
67296874 OH |
634 | /* |
635 | * Decode <page, obj_idx> pair from the given object handle. We adjust the | |
636 | * decoded obj_idx back to its original value since it was adjusted in | |
637 | * obj_location_to_handle(). | |
638 | */ | |
2e40e163 | 639 | static void obj_to_location(unsigned long handle, struct page **page, |
61989a80 NG |
640 | unsigned long *obj_idx) |
641 | { | |
c2344348 | 642 | *page = pfn_to_page(handle >> OBJ_INDEX_BITS); |
67296874 | 643 | *obj_idx = (handle & OBJ_INDEX_MASK) - 1; |
61989a80 NG |
644 | } |
645 | ||
2e40e163 MK |
646 | static unsigned long handle_to_obj(unsigned long handle) |
647 | { | |
648 | return *(unsigned long *)handle; | |
649 | } | |
650 | ||
61989a80 NG |
651 | static unsigned long obj_idx_to_offset(struct page *page, |
652 | unsigned long obj_idx, int class_size) | |
653 | { | |
654 | unsigned long off = 0; | |
655 | ||
656 | if (!is_first_page(page)) | |
657 | off = page->index; | |
658 | ||
659 | return off + obj_idx * class_size; | |
660 | } | |
661 | ||
f4477e90 NG |
662 | static void reset_page(struct page *page) |
663 | { | |
664 | clear_bit(PG_private, &page->flags); | |
665 | clear_bit(PG_private_2, &page->flags); | |
666 | set_page_private(page, 0); | |
667 | page->mapping = NULL; | |
668 | page->freelist = NULL; | |
22b751c3 | 669 | page_mapcount_reset(page); |
f4477e90 NG |
670 | } |
671 | ||
61989a80 NG |
672 | static void free_zspage(struct page *first_page) |
673 | { | |
f4477e90 | 674 | struct page *nextp, *tmp, *head_extra; |
61989a80 NG |
675 | |
676 | BUG_ON(!is_first_page(first_page)); | |
677 | BUG_ON(first_page->inuse); | |
678 | ||
f4477e90 | 679 | head_extra = (struct page *)page_private(first_page); |
61989a80 | 680 | |
f4477e90 | 681 | reset_page(first_page); |
61989a80 NG |
682 | __free_page(first_page); |
683 | ||
684 | /* zspage with only 1 system page */ | |
f4477e90 | 685 | if (!head_extra) |
61989a80 NG |
686 | return; |
687 | ||
f4477e90 | 688 | list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { |
61989a80 | 689 | list_del(&nextp->lru); |
f4477e90 | 690 | reset_page(nextp); |
61989a80 NG |
691 | __free_page(nextp); |
692 | } | |
f4477e90 NG |
693 | reset_page(head_extra); |
694 | __free_page(head_extra); | |
61989a80 NG |
695 | } |
696 | ||
697 | /* Initialize a newly allocated zspage */ | |
698 | static void init_zspage(struct page *first_page, struct size_class *class) | |
699 | { | |
700 | unsigned long off = 0; | |
701 | struct page *page = first_page; | |
702 | ||
703 | BUG_ON(!is_first_page(first_page)); | |
704 | while (page) { | |
705 | struct page *next_page; | |
706 | struct link_free *link; | |
5538c562 | 707 | unsigned int i = 1; |
af4ee5e9 | 708 | void *vaddr; |
61989a80 NG |
709 | |
710 | /* | |
711 | * page->index stores offset of first object starting | |
712 | * in the page. For the first page, this is always 0, | |
713 | * so we use first_page->index (aka ->freelist) to store | |
714 | * head of corresponding zspage's freelist. | |
715 | */ | |
716 | if (page != first_page) | |
717 | page->index = off; | |
718 | ||
af4ee5e9 MK |
719 | vaddr = kmap_atomic(page); |
720 | link = (struct link_free *)vaddr + off / sizeof(*link); | |
5538c562 DS |
721 | |
722 | while ((off += class->size) < PAGE_SIZE) { | |
723 | link->next = obj_location_to_handle(page, i++); | |
724 | link += class->size / sizeof(*link); | |
61989a80 NG |
725 | } |
726 | ||
727 | /* | |
728 | * We now come to the last (full or partial) object on this | |
729 | * page, which must point to the first object on the next | |
730 | * page (if present) | |
731 | */ | |
732 | next_page = get_next_page(page); | |
733 | link->next = obj_location_to_handle(next_page, 0); | |
af4ee5e9 | 734 | kunmap_atomic(vaddr); |
61989a80 | 735 | page = next_page; |
5538c562 | 736 | off %= PAGE_SIZE; |
61989a80 NG |
737 | } |
738 | } | |
739 | ||
740 | /* | |
741 | * Allocate a zspage for the given size class | |
742 | */ | |
743 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
744 | { | |
745 | int i, error; | |
b4b700c5 | 746 | struct page *first_page = NULL, *uninitialized_var(prev_page); |
61989a80 NG |
747 | |
748 | /* | |
749 | * Allocate individual pages and link them together as: | |
750 | * 1. first page->private = first sub-page | |
751 | * 2. all sub-pages are linked together using page->lru | |
752 | * 3. each sub-page is linked to the first page using page->first_page | |
753 | * | |
754 | * For each size class, First/Head pages are linked together using | |
755 | * page->lru. Also, we set PG_private to identify the first page | |
756 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
757 | * identify the last page. | |
758 | */ | |
759 | error = -ENOMEM; | |
2e3b6154 | 760 | for (i = 0; i < class->pages_per_zspage; i++) { |
b4b700c5 | 761 | struct page *page; |
61989a80 NG |
762 | |
763 | page = alloc_page(flags); | |
764 | if (!page) | |
765 | goto cleanup; | |
766 | ||
767 | INIT_LIST_HEAD(&page->lru); | |
768 | if (i == 0) { /* first page */ | |
a27545bf | 769 | SetPagePrivate(page); |
61989a80 NG |
770 | set_page_private(page, 0); |
771 | first_page = page; | |
772 | first_page->inuse = 0; | |
773 | } | |
774 | if (i == 1) | |
e842b976 | 775 | set_page_private(first_page, (unsigned long)page); |
61989a80 NG |
776 | if (i >= 1) |
777 | page->first_page = first_page; | |
778 | if (i >= 2) | |
779 | list_add(&page->lru, &prev_page->lru); | |
2e3b6154 | 780 | if (i == class->pages_per_zspage - 1) /* last page */ |
a27545bf | 781 | SetPagePrivate2(page); |
61989a80 NG |
782 | prev_page = page; |
783 | } | |
784 | ||
785 | init_zspage(first_page, class); | |
786 | ||
787 | first_page->freelist = obj_location_to_handle(first_page, 0); | |
788 | /* Maximum number of objects we can store in this zspage */ | |
2e3b6154 | 789 | first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; |
61989a80 NG |
790 | |
791 | error = 0; /* Success */ | |
792 | ||
793 | cleanup: | |
794 | if (unlikely(error) && first_page) { | |
795 | free_zspage(first_page); | |
796 | first_page = NULL; | |
797 | } | |
798 | ||
799 | return first_page; | |
800 | } | |
801 | ||
802 | static struct page *find_get_zspage(struct size_class *class) | |
803 | { | |
804 | int i; | |
805 | struct page *page; | |
806 | ||
807 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
808 | page = class->fullness_list[i]; | |
809 | if (page) | |
810 | break; | |
811 | } | |
812 | ||
813 | return page; | |
814 | } | |
815 | ||
1b945aee | 816 | #ifdef CONFIG_PGTABLE_MAPPING |
f553646a SJ |
817 | static inline int __zs_cpu_up(struct mapping_area *area) |
818 | { | |
819 | /* | |
820 | * Make sure we don't leak memory if a cpu UP notification | |
821 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
822 | */ | |
823 | if (area->vm) | |
824 | return 0; | |
825 | area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); | |
826 | if (!area->vm) | |
827 | return -ENOMEM; | |
828 | return 0; | |
829 | } | |
830 | ||
831 | static inline void __zs_cpu_down(struct mapping_area *area) | |
832 | { | |
833 | if (area->vm) | |
834 | free_vm_area(area->vm); | |
835 | area->vm = NULL; | |
836 | } | |
837 | ||
838 | static inline void *__zs_map_object(struct mapping_area *area, | |
839 | struct page *pages[2], int off, int size) | |
840 | { | |
f6f8ed47 | 841 | BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); |
f553646a SJ |
842 | area->vm_addr = area->vm->addr; |
843 | return area->vm_addr + off; | |
844 | } | |
845 | ||
846 | static inline void __zs_unmap_object(struct mapping_area *area, | |
847 | struct page *pages[2], int off, int size) | |
848 | { | |
849 | unsigned long addr = (unsigned long)area->vm_addr; | |
f553646a | 850 | |
d95abbbb | 851 | unmap_kernel_range(addr, PAGE_SIZE * 2); |
f553646a SJ |
852 | } |
853 | ||
1b945aee | 854 | #else /* CONFIG_PGTABLE_MAPPING */ |
f553646a SJ |
855 | |
856 | static inline int __zs_cpu_up(struct mapping_area *area) | |
857 | { | |
858 | /* | |
859 | * Make sure we don't leak memory if a cpu UP notification | |
860 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
861 | */ | |
862 | if (area->vm_buf) | |
863 | return 0; | |
40f9fb8c | 864 | area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); |
f553646a SJ |
865 | if (!area->vm_buf) |
866 | return -ENOMEM; | |
867 | return 0; | |
868 | } | |
869 | ||
870 | static inline void __zs_cpu_down(struct mapping_area *area) | |
871 | { | |
40f9fb8c | 872 | kfree(area->vm_buf); |
f553646a SJ |
873 | area->vm_buf = NULL; |
874 | } | |
875 | ||
876 | static void *__zs_map_object(struct mapping_area *area, | |
877 | struct page *pages[2], int off, int size) | |
5f601902 | 878 | { |
5f601902 SJ |
879 | int sizes[2]; |
880 | void *addr; | |
f553646a | 881 | char *buf = area->vm_buf; |
5f601902 | 882 | |
f553646a SJ |
883 | /* disable page faults to match kmap_atomic() return conditions */ |
884 | pagefault_disable(); | |
885 | ||
886 | /* no read fastpath */ | |
887 | if (area->vm_mm == ZS_MM_WO) | |
888 | goto out; | |
5f601902 SJ |
889 | |
890 | sizes[0] = PAGE_SIZE - off; | |
891 | sizes[1] = size - sizes[0]; | |
892 | ||
5f601902 SJ |
893 | /* copy object to per-cpu buffer */ |
894 | addr = kmap_atomic(pages[0]); | |
895 | memcpy(buf, addr + off, sizes[0]); | |
896 | kunmap_atomic(addr); | |
897 | addr = kmap_atomic(pages[1]); | |
898 | memcpy(buf + sizes[0], addr, sizes[1]); | |
899 | kunmap_atomic(addr); | |
f553646a SJ |
900 | out: |
901 | return area->vm_buf; | |
5f601902 SJ |
902 | } |
903 | ||
f553646a SJ |
904 | static void __zs_unmap_object(struct mapping_area *area, |
905 | struct page *pages[2], int off, int size) | |
5f601902 | 906 | { |
5f601902 SJ |
907 | int sizes[2]; |
908 | void *addr; | |
2e40e163 | 909 | char *buf; |
5f601902 | 910 | |
f553646a SJ |
911 | /* no write fastpath */ |
912 | if (area->vm_mm == ZS_MM_RO) | |
913 | goto out; | |
5f601902 | 914 | |
2e40e163 MK |
915 | buf = area->vm_buf + ZS_HANDLE_SIZE; |
916 | size -= ZS_HANDLE_SIZE; | |
917 | off += ZS_HANDLE_SIZE; | |
918 | ||
5f601902 SJ |
919 | sizes[0] = PAGE_SIZE - off; |
920 | sizes[1] = size - sizes[0]; | |
921 | ||
922 | /* copy per-cpu buffer to object */ | |
923 | addr = kmap_atomic(pages[0]); | |
924 | memcpy(addr + off, buf, sizes[0]); | |
925 | kunmap_atomic(addr); | |
926 | addr = kmap_atomic(pages[1]); | |
927 | memcpy(addr, buf + sizes[0], sizes[1]); | |
928 | kunmap_atomic(addr); | |
f553646a SJ |
929 | |
930 | out: | |
931 | /* enable page faults to match kunmap_atomic() return conditions */ | |
932 | pagefault_enable(); | |
5f601902 | 933 | } |
61989a80 | 934 | |
1b945aee | 935 | #endif /* CONFIG_PGTABLE_MAPPING */ |
f553646a | 936 | |
61989a80 NG |
937 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, |
938 | void *pcpu) | |
939 | { | |
f553646a | 940 | int ret, cpu = (long)pcpu; |
61989a80 NG |
941 | struct mapping_area *area; |
942 | ||
943 | switch (action) { | |
944 | case CPU_UP_PREPARE: | |
945 | area = &per_cpu(zs_map_area, cpu); | |
f553646a SJ |
946 | ret = __zs_cpu_up(area); |
947 | if (ret) | |
948 | return notifier_from_errno(ret); | |
61989a80 NG |
949 | break; |
950 | case CPU_DEAD: | |
951 | case CPU_UP_CANCELED: | |
952 | area = &per_cpu(zs_map_area, cpu); | |
f553646a | 953 | __zs_cpu_down(area); |
61989a80 NG |
954 | break; |
955 | } | |
956 | ||
957 | return NOTIFY_OK; | |
958 | } | |
959 | ||
960 | static struct notifier_block zs_cpu_nb = { | |
961 | .notifier_call = zs_cpu_notifier | |
962 | }; | |
963 | ||
b1b00a5b | 964 | static int zs_register_cpu_notifier(void) |
61989a80 | 965 | { |
b1b00a5b | 966 | int cpu, uninitialized_var(ret); |
61989a80 | 967 | |
f0e71fcd SB |
968 | cpu_notifier_register_begin(); |
969 | ||
970 | __register_cpu_notifier(&zs_cpu_nb); | |
61989a80 NG |
971 | for_each_online_cpu(cpu) { |
972 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
b1b00a5b SS |
973 | if (notifier_to_errno(ret)) |
974 | break; | |
61989a80 | 975 | } |
f0e71fcd SB |
976 | |
977 | cpu_notifier_register_done(); | |
b1b00a5b SS |
978 | return notifier_to_errno(ret); |
979 | } | |
f0e71fcd | 980 | |
66cdef66 | 981 | static void zs_unregister_cpu_notifier(void) |
40f9fb8c | 982 | { |
66cdef66 | 983 | int cpu; |
40f9fb8c | 984 | |
66cdef66 | 985 | cpu_notifier_register_begin(); |
40f9fb8c | 986 | |
66cdef66 GM |
987 | for_each_online_cpu(cpu) |
988 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
989 | __unregister_cpu_notifier(&zs_cpu_nb); | |
40f9fb8c | 990 | |
66cdef66 | 991 | cpu_notifier_register_done(); |
b1b00a5b SS |
992 | } |
993 | ||
66cdef66 | 994 | static void init_zs_size_classes(void) |
b1b00a5b | 995 | { |
66cdef66 | 996 | int nr; |
c795779d | 997 | |
66cdef66 GM |
998 | nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; |
999 | if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) | |
1000 | nr += 1; | |
40f9fb8c | 1001 | |
66cdef66 | 1002 | zs_size_classes = nr; |
61989a80 NG |
1003 | } |
1004 | ||
9eec4cd5 JK |
1005 | static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) |
1006 | { | |
1007 | return pages_per_zspage * PAGE_SIZE / size; | |
1008 | } | |
1009 | ||
1010 | static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) | |
1011 | { | |
1012 | if (prev->pages_per_zspage != pages_per_zspage) | |
1013 | return false; | |
1014 | ||
1015 | if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) | |
1016 | != get_maxobj_per_zspage(size, pages_per_zspage)) | |
1017 | return false; | |
1018 | ||
1019 | return true; | |
1020 | } | |
1021 | ||
0f050d99 GM |
1022 | #ifdef CONFIG_ZSMALLOC_STAT |
1023 | ||
1024 | static inline void zs_stat_inc(struct size_class *class, | |
1025 | enum zs_stat_type type, unsigned long cnt) | |
1026 | { | |
1027 | class->stats.objs[type] += cnt; | |
1028 | } | |
1029 | ||
1030 | static inline void zs_stat_dec(struct size_class *class, | |
1031 | enum zs_stat_type type, unsigned long cnt) | |
1032 | { | |
1033 | class->stats.objs[type] -= cnt; | |
1034 | } | |
1035 | ||
1036 | static inline unsigned long zs_stat_get(struct size_class *class, | |
1037 | enum zs_stat_type type) | |
1038 | { | |
1039 | return class->stats.objs[type]; | |
1040 | } | |
1041 | ||
1042 | static int __init zs_stat_init(void) | |
1043 | { | |
1044 | if (!debugfs_initialized()) | |
1045 | return -ENODEV; | |
1046 | ||
1047 | zs_stat_root = debugfs_create_dir("zsmalloc", NULL); | |
1048 | if (!zs_stat_root) | |
1049 | return -ENOMEM; | |
1050 | ||
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | static void __exit zs_stat_exit(void) | |
1055 | { | |
1056 | debugfs_remove_recursive(zs_stat_root); | |
1057 | } | |
1058 | ||
1059 | static int zs_stats_size_show(struct seq_file *s, void *v) | |
1060 | { | |
1061 | int i; | |
1062 | struct zs_pool *pool = s->private; | |
1063 | struct size_class *class; | |
1064 | int objs_per_zspage; | |
1065 | unsigned long obj_allocated, obj_used, pages_used; | |
1066 | unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; | |
1067 | ||
1068 | seq_printf(s, " %5s %5s %13s %10s %10s\n", "class", "size", | |
1069 | "obj_allocated", "obj_used", "pages_used"); | |
1070 | ||
1071 | for (i = 0; i < zs_size_classes; i++) { | |
1072 | class = pool->size_class[i]; | |
1073 | ||
1074 | if (class->index != i) | |
1075 | continue; | |
1076 | ||
1077 | spin_lock(&class->lock); | |
1078 | obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); | |
1079 | obj_used = zs_stat_get(class, OBJ_USED); | |
1080 | spin_unlock(&class->lock); | |
1081 | ||
1082 | objs_per_zspage = get_maxobj_per_zspage(class->size, | |
1083 | class->pages_per_zspage); | |
1084 | pages_used = obj_allocated / objs_per_zspage * | |
1085 | class->pages_per_zspage; | |
1086 | ||
1087 | seq_printf(s, " %5u %5u %10lu %10lu %10lu\n", i, | |
1088 | class->size, obj_allocated, obj_used, pages_used); | |
1089 | ||
1090 | total_objs += obj_allocated; | |
1091 | total_used_objs += obj_used; | |
1092 | total_pages += pages_used; | |
1093 | } | |
1094 | ||
1095 | seq_puts(s, "\n"); | |
1096 | seq_printf(s, " %5s %5s %10lu %10lu %10lu\n", "Total", "", | |
1097 | total_objs, total_used_objs, total_pages); | |
1098 | ||
1099 | return 0; | |
1100 | } | |
1101 | ||
1102 | static int zs_stats_size_open(struct inode *inode, struct file *file) | |
1103 | { | |
1104 | return single_open(file, zs_stats_size_show, inode->i_private); | |
1105 | } | |
1106 | ||
1107 | static const struct file_operations zs_stat_size_ops = { | |
1108 | .open = zs_stats_size_open, | |
1109 | .read = seq_read, | |
1110 | .llseek = seq_lseek, | |
1111 | .release = single_release, | |
1112 | }; | |
1113 | ||
1114 | static int zs_pool_stat_create(char *name, struct zs_pool *pool) | |
1115 | { | |
1116 | struct dentry *entry; | |
1117 | ||
1118 | if (!zs_stat_root) | |
1119 | return -ENODEV; | |
1120 | ||
1121 | entry = debugfs_create_dir(name, zs_stat_root); | |
1122 | if (!entry) { | |
1123 | pr_warn("debugfs dir <%s> creation failed\n", name); | |
1124 | return -ENOMEM; | |
1125 | } | |
1126 | pool->stat_dentry = entry; | |
1127 | ||
1128 | entry = debugfs_create_file("obj_in_classes", S_IFREG | S_IRUGO, | |
1129 | pool->stat_dentry, pool, &zs_stat_size_ops); | |
1130 | if (!entry) { | |
1131 | pr_warn("%s: debugfs file entry <%s> creation failed\n", | |
1132 | name, "obj_in_classes"); | |
1133 | return -ENOMEM; | |
1134 | } | |
1135 | ||
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static void zs_pool_stat_destroy(struct zs_pool *pool) | |
1140 | { | |
1141 | debugfs_remove_recursive(pool->stat_dentry); | |
1142 | } | |
1143 | ||
1144 | #else /* CONFIG_ZSMALLOC_STAT */ | |
1145 | ||
1146 | static inline void zs_stat_inc(struct size_class *class, | |
1147 | enum zs_stat_type type, unsigned long cnt) | |
1148 | { | |
1149 | } | |
1150 | ||
1151 | static inline void zs_stat_dec(struct size_class *class, | |
1152 | enum zs_stat_type type, unsigned long cnt) | |
1153 | { | |
1154 | } | |
1155 | ||
1156 | static inline unsigned long zs_stat_get(struct size_class *class, | |
1157 | enum zs_stat_type type) | |
1158 | { | |
1159 | return 0; | |
1160 | } | |
1161 | ||
1162 | static int __init zs_stat_init(void) | |
1163 | { | |
1164 | return 0; | |
1165 | } | |
1166 | ||
1167 | static void __exit zs_stat_exit(void) | |
1168 | { | |
1169 | } | |
1170 | ||
1171 | static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) | |
1172 | { | |
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | static inline void zs_pool_stat_destroy(struct zs_pool *pool) | |
1177 | { | |
1178 | } | |
1179 | ||
1180 | #endif | |
1181 | ||
66cdef66 GM |
1182 | unsigned long zs_get_total_pages(struct zs_pool *pool) |
1183 | { | |
1184 | return atomic_long_read(&pool->pages_allocated); | |
1185 | } | |
1186 | EXPORT_SYMBOL_GPL(zs_get_total_pages); | |
1187 | ||
4bbc0bc0 | 1188 | /** |
66cdef66 GM |
1189 | * zs_map_object - get address of allocated object from handle. |
1190 | * @pool: pool from which the object was allocated | |
1191 | * @handle: handle returned from zs_malloc | |
4bbc0bc0 | 1192 | * |
66cdef66 GM |
1193 | * Before using an object allocated from zs_malloc, it must be mapped using |
1194 | * this function. When done with the object, it must be unmapped using | |
1195 | * zs_unmap_object. | |
4bbc0bc0 | 1196 | * |
66cdef66 GM |
1197 | * Only one object can be mapped per cpu at a time. There is no protection |
1198 | * against nested mappings. | |
1199 | * | |
1200 | * This function returns with preemption and page faults disabled. | |
4bbc0bc0 | 1201 | */ |
66cdef66 GM |
1202 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, |
1203 | enum zs_mapmode mm) | |
61989a80 | 1204 | { |
66cdef66 | 1205 | struct page *page; |
2e40e163 | 1206 | unsigned long obj, obj_idx, off; |
61989a80 | 1207 | |
66cdef66 GM |
1208 | unsigned int class_idx; |
1209 | enum fullness_group fg; | |
1210 | struct size_class *class; | |
1211 | struct mapping_area *area; | |
1212 | struct page *pages[2]; | |
2e40e163 | 1213 | void *ret; |
61989a80 | 1214 | |
66cdef66 | 1215 | BUG_ON(!handle); |
40f9fb8c | 1216 | |
9eec4cd5 | 1217 | /* |
66cdef66 GM |
1218 | * Because we use per-cpu mapping areas shared among the |
1219 | * pools/users, we can't allow mapping in interrupt context | |
1220 | * because it can corrupt another users mappings. | |
9eec4cd5 | 1221 | */ |
66cdef66 | 1222 | BUG_ON(in_interrupt()); |
61989a80 | 1223 | |
2e40e163 MK |
1224 | obj = handle_to_obj(handle); |
1225 | obj_to_location(obj, &page, &obj_idx); | |
66cdef66 GM |
1226 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1227 | class = pool->size_class[class_idx]; | |
1228 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
df8b5bb9 | 1229 | |
66cdef66 GM |
1230 | area = &get_cpu_var(zs_map_area); |
1231 | area->vm_mm = mm; | |
1232 | if (off + class->size <= PAGE_SIZE) { | |
1233 | /* this object is contained entirely within a page */ | |
1234 | area->vm_addr = kmap_atomic(page); | |
2e40e163 MK |
1235 | ret = area->vm_addr + off; |
1236 | goto out; | |
61989a80 NG |
1237 | } |
1238 | ||
66cdef66 GM |
1239 | /* this object spans two pages */ |
1240 | pages[0] = page; | |
1241 | pages[1] = get_next_page(page); | |
1242 | BUG_ON(!pages[1]); | |
9eec4cd5 | 1243 | |
2e40e163 MK |
1244 | ret = __zs_map_object(area, pages, off, class->size); |
1245 | out: | |
1246 | return ret + ZS_HANDLE_SIZE; | |
61989a80 | 1247 | } |
66cdef66 | 1248 | EXPORT_SYMBOL_GPL(zs_map_object); |
61989a80 | 1249 | |
66cdef66 | 1250 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) |
61989a80 | 1251 | { |
66cdef66 | 1252 | struct page *page; |
2e40e163 | 1253 | unsigned long obj, obj_idx, off; |
61989a80 | 1254 | |
66cdef66 GM |
1255 | unsigned int class_idx; |
1256 | enum fullness_group fg; | |
1257 | struct size_class *class; | |
1258 | struct mapping_area *area; | |
9eec4cd5 | 1259 | |
66cdef66 | 1260 | BUG_ON(!handle); |
9eec4cd5 | 1261 | |
2e40e163 MK |
1262 | obj = handle_to_obj(handle); |
1263 | obj_to_location(obj, &page, &obj_idx); | |
66cdef66 GM |
1264 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1265 | class = pool->size_class[class_idx]; | |
1266 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
61989a80 | 1267 | |
66cdef66 GM |
1268 | area = this_cpu_ptr(&zs_map_area); |
1269 | if (off + class->size <= PAGE_SIZE) | |
1270 | kunmap_atomic(area->vm_addr); | |
1271 | else { | |
1272 | struct page *pages[2]; | |
40f9fb8c | 1273 | |
66cdef66 GM |
1274 | pages[0] = page; |
1275 | pages[1] = get_next_page(page); | |
1276 | BUG_ON(!pages[1]); | |
1277 | ||
1278 | __zs_unmap_object(area, pages, off, class->size); | |
1279 | } | |
1280 | put_cpu_var(zs_map_area); | |
61989a80 | 1281 | } |
66cdef66 | 1282 | EXPORT_SYMBOL_GPL(zs_unmap_object); |
61989a80 NG |
1283 | |
1284 | /** | |
1285 | * zs_malloc - Allocate block of given size from pool. | |
1286 | * @pool: pool to allocate from | |
1287 | * @size: size of block to allocate | |
61989a80 | 1288 | * |
00a61d86 | 1289 | * On success, handle to the allocated object is returned, |
c2344348 | 1290 | * otherwise 0. |
61989a80 NG |
1291 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. |
1292 | */ | |
c2344348 | 1293 | unsigned long zs_malloc(struct zs_pool *pool, size_t size) |
61989a80 | 1294 | { |
2e40e163 | 1295 | unsigned long handle, obj; |
61989a80 | 1296 | struct link_free *link; |
61989a80 | 1297 | struct size_class *class; |
af4ee5e9 | 1298 | void *vaddr; |
61989a80 NG |
1299 | |
1300 | struct page *first_page, *m_page; | |
1301 | unsigned long m_objidx, m_offset; | |
1302 | ||
2e40e163 MK |
1303 | if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE)) |
1304 | return 0; | |
1305 | ||
1306 | handle = alloc_handle(pool); | |
1307 | if (!handle) | |
c2344348 | 1308 | return 0; |
61989a80 | 1309 | |
2e40e163 MK |
1310 | /* extra space in chunk to keep the handle */ |
1311 | size += ZS_HANDLE_SIZE; | |
9eec4cd5 | 1312 | class = pool->size_class[get_size_class_index(size)]; |
61989a80 NG |
1313 | |
1314 | spin_lock(&class->lock); | |
1315 | first_page = find_get_zspage(class); | |
1316 | ||
1317 | if (!first_page) { | |
1318 | spin_unlock(&class->lock); | |
1319 | first_page = alloc_zspage(class, pool->flags); | |
2e40e163 MK |
1320 | if (unlikely(!first_page)) { |
1321 | free_handle(pool, handle); | |
c2344348 | 1322 | return 0; |
2e40e163 | 1323 | } |
61989a80 NG |
1324 | |
1325 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
13de8933 MK |
1326 | atomic_long_add(class->pages_per_zspage, |
1327 | &pool->pages_allocated); | |
0f050d99 | 1328 | |
61989a80 | 1329 | spin_lock(&class->lock); |
0f050d99 GM |
1330 | zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( |
1331 | class->size, class->pages_per_zspage)); | |
61989a80 NG |
1332 | } |
1333 | ||
c2344348 | 1334 | obj = (unsigned long)first_page->freelist; |
2e40e163 | 1335 | obj_to_location(obj, &m_page, &m_objidx); |
61989a80 NG |
1336 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); |
1337 | ||
af4ee5e9 MK |
1338 | vaddr = kmap_atomic(m_page); |
1339 | link = (struct link_free *)vaddr + m_offset / sizeof(*link); | |
61989a80 | 1340 | first_page->freelist = link->next; |
2e40e163 MK |
1341 | |
1342 | /* record handle in the header of allocated chunk */ | |
1343 | link->handle = handle; | |
af4ee5e9 | 1344 | kunmap_atomic(vaddr); |
61989a80 NG |
1345 | |
1346 | first_page->inuse++; | |
0f050d99 | 1347 | zs_stat_inc(class, OBJ_USED, 1); |
61989a80 NG |
1348 | /* Now move the zspage to another fullness group, if required */ |
1349 | fix_fullness_group(pool, first_page); | |
2e40e163 | 1350 | record_obj(handle, obj); |
61989a80 NG |
1351 | spin_unlock(&class->lock); |
1352 | ||
2e40e163 | 1353 | return handle; |
61989a80 NG |
1354 | } |
1355 | EXPORT_SYMBOL_GPL(zs_malloc); | |
1356 | ||
2e40e163 | 1357 | void zs_free(struct zs_pool *pool, unsigned long handle) |
61989a80 NG |
1358 | { |
1359 | struct link_free *link; | |
1360 | struct page *first_page, *f_page; | |
2e40e163 | 1361 | unsigned long obj, f_objidx, f_offset; |
af4ee5e9 | 1362 | void *vaddr; |
61989a80 NG |
1363 | |
1364 | int class_idx; | |
1365 | struct size_class *class; | |
1366 | enum fullness_group fullness; | |
1367 | ||
2e40e163 | 1368 | if (unlikely(!handle)) |
61989a80 NG |
1369 | return; |
1370 | ||
2e40e163 MK |
1371 | obj = handle_to_obj(handle); |
1372 | free_handle(pool, handle); | |
1373 | obj_to_location(obj, &f_page, &f_objidx); | |
61989a80 NG |
1374 | first_page = get_first_page(f_page); |
1375 | ||
1376 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
9eec4cd5 | 1377 | class = pool->size_class[class_idx]; |
61989a80 NG |
1378 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); |
1379 | ||
1380 | spin_lock(&class->lock); | |
1381 | ||
1382 | /* Insert this object in containing zspage's freelist */ | |
af4ee5e9 MK |
1383 | vaddr = kmap_atomic(f_page); |
1384 | link = (struct link_free *)(vaddr + f_offset); | |
61989a80 | 1385 | link->next = first_page->freelist; |
af4ee5e9 | 1386 | kunmap_atomic(vaddr); |
c2344348 | 1387 | first_page->freelist = (void *)obj; |
61989a80 NG |
1388 | |
1389 | first_page->inuse--; | |
1390 | fullness = fix_fullness_group(pool, first_page); | |
0f050d99 GM |
1391 | |
1392 | zs_stat_dec(class, OBJ_USED, 1); | |
1393 | if (fullness == ZS_EMPTY) | |
1394 | zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( | |
1395 | class->size, class->pages_per_zspage)); | |
1396 | ||
61989a80 NG |
1397 | spin_unlock(&class->lock); |
1398 | ||
13de8933 MK |
1399 | if (fullness == ZS_EMPTY) { |
1400 | atomic_long_sub(class->pages_per_zspage, | |
1401 | &pool->pages_allocated); | |
61989a80 | 1402 | free_zspage(first_page); |
13de8933 | 1403 | } |
61989a80 NG |
1404 | } |
1405 | EXPORT_SYMBOL_GPL(zs_free); | |
1406 | ||
00a61d86 | 1407 | /** |
66cdef66 GM |
1408 | * zs_create_pool - Creates an allocation pool to work from. |
1409 | * @flags: allocation flags used to allocate pool metadata | |
166cfda7 | 1410 | * |
66cdef66 GM |
1411 | * This function must be called before anything when using |
1412 | * the zsmalloc allocator. | |
166cfda7 | 1413 | * |
66cdef66 GM |
1414 | * On success, a pointer to the newly created pool is returned, |
1415 | * otherwise NULL. | |
396b7fd6 | 1416 | */ |
3eba0c6a | 1417 | struct zs_pool *zs_create_pool(char *name, gfp_t flags) |
61989a80 | 1418 | { |
66cdef66 GM |
1419 | int i; |
1420 | struct zs_pool *pool; | |
1421 | struct size_class *prev_class = NULL; | |
61989a80 | 1422 | |
66cdef66 GM |
1423 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
1424 | if (!pool) | |
1425 | return NULL; | |
61989a80 | 1426 | |
66cdef66 GM |
1427 | pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), |
1428 | GFP_KERNEL); | |
1429 | if (!pool->size_class) { | |
1430 | kfree(pool); | |
1431 | return NULL; | |
1432 | } | |
61989a80 | 1433 | |
2e40e163 MK |
1434 | pool->name = kstrdup(name, GFP_KERNEL); |
1435 | if (!pool->name) | |
1436 | goto err; | |
1437 | ||
1438 | if (create_handle_cache(pool)) | |
1439 | goto err; | |
1440 | ||
c60369f0 | 1441 | /* |
66cdef66 GM |
1442 | * Iterate reversly, because, size of size_class that we want to use |
1443 | * for merging should be larger or equal to current size. | |
c60369f0 | 1444 | */ |
66cdef66 GM |
1445 | for (i = zs_size_classes - 1; i >= 0; i--) { |
1446 | int size; | |
1447 | int pages_per_zspage; | |
1448 | struct size_class *class; | |
c60369f0 | 1449 | |
66cdef66 GM |
1450 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; |
1451 | if (size > ZS_MAX_ALLOC_SIZE) | |
1452 | size = ZS_MAX_ALLOC_SIZE; | |
1453 | pages_per_zspage = get_pages_per_zspage(size); | |
61989a80 | 1454 | |
66cdef66 GM |
1455 | /* |
1456 | * size_class is used for normal zsmalloc operation such | |
1457 | * as alloc/free for that size. Although it is natural that we | |
1458 | * have one size_class for each size, there is a chance that we | |
1459 | * can get more memory utilization if we use one size_class for | |
1460 | * many different sizes whose size_class have same | |
1461 | * characteristics. So, we makes size_class point to | |
1462 | * previous size_class if possible. | |
1463 | */ | |
1464 | if (prev_class) { | |
1465 | if (can_merge(prev_class, size, pages_per_zspage)) { | |
1466 | pool->size_class[i] = prev_class; | |
1467 | continue; | |
1468 | } | |
1469 | } | |
1470 | ||
1471 | class = kzalloc(sizeof(struct size_class), GFP_KERNEL); | |
1472 | if (!class) | |
1473 | goto err; | |
1474 | ||
1475 | class->size = size; | |
1476 | class->index = i; | |
1477 | class->pages_per_zspage = pages_per_zspage; | |
1478 | spin_lock_init(&class->lock); | |
1479 | pool->size_class[i] = class; | |
1480 | ||
1481 | prev_class = class; | |
61989a80 NG |
1482 | } |
1483 | ||
66cdef66 | 1484 | pool->flags = flags; |
b7418510 | 1485 | |
0f050d99 GM |
1486 | if (zs_pool_stat_create(name, pool)) |
1487 | goto err; | |
1488 | ||
66cdef66 GM |
1489 | return pool; |
1490 | ||
1491 | err: | |
1492 | zs_destroy_pool(pool); | |
1493 | return NULL; | |
61989a80 | 1494 | } |
66cdef66 | 1495 | EXPORT_SYMBOL_GPL(zs_create_pool); |
61989a80 | 1496 | |
66cdef66 | 1497 | void zs_destroy_pool(struct zs_pool *pool) |
61989a80 | 1498 | { |
66cdef66 | 1499 | int i; |
61989a80 | 1500 | |
0f050d99 GM |
1501 | zs_pool_stat_destroy(pool); |
1502 | ||
66cdef66 GM |
1503 | for (i = 0; i < zs_size_classes; i++) { |
1504 | int fg; | |
1505 | struct size_class *class = pool->size_class[i]; | |
61989a80 | 1506 | |
66cdef66 GM |
1507 | if (!class) |
1508 | continue; | |
61989a80 | 1509 | |
66cdef66 GM |
1510 | if (class->index != i) |
1511 | continue; | |
61989a80 | 1512 | |
66cdef66 GM |
1513 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { |
1514 | if (class->fullness_list[fg]) { | |
1515 | pr_info("Freeing non-empty class with size %db, fullness group %d\n", | |
1516 | class->size, fg); | |
1517 | } | |
1518 | } | |
1519 | kfree(class); | |
1520 | } | |
f553646a | 1521 | |
2e40e163 | 1522 | destroy_handle_cache(pool); |
66cdef66 | 1523 | kfree(pool->size_class); |
0f050d99 | 1524 | kfree(pool->name); |
66cdef66 GM |
1525 | kfree(pool); |
1526 | } | |
1527 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
b7418510 | 1528 | |
66cdef66 GM |
1529 | static int __init zs_init(void) |
1530 | { | |
1531 | int ret = zs_register_cpu_notifier(); | |
1532 | ||
0f050d99 GM |
1533 | if (ret) |
1534 | goto notifier_fail; | |
66cdef66 GM |
1535 | |
1536 | init_zs_size_classes(); | |
1537 | ||
1538 | #ifdef CONFIG_ZPOOL | |
1539 | zpool_register_driver(&zs_zpool_driver); | |
1540 | #endif | |
0f050d99 GM |
1541 | |
1542 | ret = zs_stat_init(); | |
1543 | if (ret) { | |
1544 | pr_err("zs stat initialization failed\n"); | |
1545 | goto stat_fail; | |
1546 | } | |
66cdef66 | 1547 | return 0; |
0f050d99 GM |
1548 | |
1549 | stat_fail: | |
1550 | #ifdef CONFIG_ZPOOL | |
1551 | zpool_unregister_driver(&zs_zpool_driver); | |
1552 | #endif | |
1553 | notifier_fail: | |
1554 | zs_unregister_cpu_notifier(); | |
1555 | ||
1556 | return ret; | |
61989a80 | 1557 | } |
61989a80 | 1558 | |
66cdef66 | 1559 | static void __exit zs_exit(void) |
61989a80 | 1560 | { |
66cdef66 GM |
1561 | #ifdef CONFIG_ZPOOL |
1562 | zpool_unregister_driver(&zs_zpool_driver); | |
1563 | #endif | |
1564 | zs_unregister_cpu_notifier(); | |
0f050d99 GM |
1565 | |
1566 | zs_stat_exit(); | |
61989a80 | 1567 | } |
069f101f BH |
1568 | |
1569 | module_init(zs_init); | |
1570 | module_exit(zs_exit); | |
1571 | ||
1572 | MODULE_LICENSE("Dual BSD/GPL"); | |
1573 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |