]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/z3fold.c
mm/z3fold.c: introduce helper functions
[thirdparty/linux.git] / mm / z3fold.c
CommitLineData
9a001fc1
VW
1/*
2 * z3fold.c
3 *
4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
6 *
7 * This implementation is based on zbud written by Seth Jennings.
8 *
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
16 *
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
19 *
20 * z3fold doesn't export any API and is meant to be used via zpool API.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/atomic.h>
d30561c5 26#include <linux/sched.h>
9a001fc1
VW
27#include <linux/list.h>
28#include <linux/mm.h>
29#include <linux/module.h>
d30561c5 30#include <linux/percpu.h>
9a001fc1 31#include <linux/preempt.h>
d30561c5 32#include <linux/workqueue.h>
9a001fc1
VW
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/zpool.h>
36
37/*****************
38 * Structures
39*****************/
ede93213
VW
40struct z3fold_pool;
41struct z3fold_ops {
42 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43};
44
45enum buddy {
46 HEADLESS = 0,
47 FIRST,
48 MIDDLE,
49 LAST,
50 BUDDIES_MAX
51};
52
53/*
d30561c5 54 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 55 * z3fold page, except for HEADLESS pages
d30561c5
VW
56 * @buddy: links the z3fold page into the relevant list in the
57 * pool
2f1e5e4d 58 * @page_lock: per-page lock
d30561c5
VW
59 * @refcount: reference count for the z3fold page
60 * @work: work_struct for page layout optimization
61 * @pool: pointer to the pool which this page belongs to
62 * @cpu: CPU which this page "belongs" to
ede93213
VW
63 * @first_chunks: the size of the first buddy in chunks, 0 if free
64 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
65 * @last_chunks: the size of the last buddy in chunks, 0 if free
66 * @first_num: the starting number (for the first handle)
67 */
68struct z3fold_header {
69 struct list_head buddy;
2f1e5e4d 70 spinlock_t page_lock;
5a27aa82 71 struct kref refcount;
d30561c5
VW
72 struct work_struct work;
73 struct z3fold_pool *pool;
74 short cpu;
ede93213
VW
75 unsigned short first_chunks;
76 unsigned short middle_chunks;
77 unsigned short last_chunks;
78 unsigned short start_middle;
79 unsigned short first_num:2;
80};
81
9a001fc1
VW
82/*
83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84 * adjusting internal fragmentation. It also determines the number of
85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
ede93213
VW
86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87 * in the beginning of an allocated page are occupied by z3fold header, so
88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89 * which shows the max number of free chunks in z3fold page, also there will
90 * be 63, or 62, respectively, freelists per pool.
9a001fc1
VW
91 */
92#define NCHUNKS_ORDER 6
93
94#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
95#define CHUNK_SIZE (1 << CHUNK_SHIFT)
ede93213
VW
96#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
9a001fc1
VW
99#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100
f201ebd8 101#define BUDDY_MASK (0x3)
ca0246bb 102#define BUDDY_SHIFT 2
9a001fc1 103
9a001fc1
VW
104/**
105 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
106 * @name: pool name
107 * @lock: protects pool unbuddied/lru lists
108 * @stale_lock: protects pool stale page list
109 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
110 * buddies; the list each z3fold page is added to depends on
111 * the size of its free region.
9a001fc1
VW
112 * @lru: list tracking the z3fold pages in LRU order by most recently
113 * added buddy.
d30561c5 114 * @stale: list of pages marked for freeing
9a001fc1
VW
115 * @pages_nr: number of z3fold pages in the pool.
116 * @ops: pointer to a structure of user defined operations specified at
117 * pool creation time.
d30561c5
VW
118 * @compact_wq: workqueue for page layout background optimization
119 * @release_wq: workqueue for safe page release
120 * @work: work_struct for safe page release
9a001fc1
VW
121 *
122 * This structure is allocated at pool creation time and maintains metadata
123 * pertaining to a particular z3fold pool.
124 */
125struct z3fold_pool {
d30561c5 126 const char *name;
9a001fc1 127 spinlock_t lock;
d30561c5
VW
128 spinlock_t stale_lock;
129 struct list_head *unbuddied;
9a001fc1 130 struct list_head lru;
d30561c5 131 struct list_head stale;
12d59ae6 132 atomic64_t pages_nr;
9a001fc1
VW
133 const struct z3fold_ops *ops;
134 struct zpool *zpool;
135 const struct zpool_ops *zpool_ops;
d30561c5
VW
136 struct workqueue_struct *compact_wq;
137 struct workqueue_struct *release_wq;
138 struct work_struct work;
9a001fc1
VW
139};
140
9a001fc1
VW
141/*
142 * Internal z3fold page flags
143 */
144enum z3fold_page_flags {
5a27aa82 145 PAGE_HEADLESS = 0,
9a001fc1 146 MIDDLE_CHUNK_MAPPED,
d30561c5 147 NEEDS_COMPACTING,
6098d7e1 148 PAGE_STALE,
ca0246bb 149 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
150};
151
152/*****************
153 * Helpers
154*****************/
155
156/* Converts an allocation size in bytes to size in z3fold chunks */
157static int size_to_chunks(size_t size)
158{
159 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
160}
161
162#define for_each_unbuddied_list(_iter, _begin) \
163 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
164
d30561c5
VW
165static void compact_page_work(struct work_struct *w);
166
9a001fc1 167/* Initializes the z3fold header of a newly allocated z3fold page */
d30561c5
VW
168static struct z3fold_header *init_z3fold_page(struct page *page,
169 struct z3fold_pool *pool)
9a001fc1
VW
170{
171 struct z3fold_header *zhdr = page_address(page);
172
173 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
174 clear_bit(PAGE_HEADLESS, &page->private);
175 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
176 clear_bit(NEEDS_COMPACTING, &page->private);
177 clear_bit(PAGE_STALE, &page->private);
ca0246bb 178 clear_bit(PAGE_CLAIMED, &page->private);
9a001fc1 179
2f1e5e4d 180 spin_lock_init(&zhdr->page_lock);
5a27aa82 181 kref_init(&zhdr->refcount);
9a001fc1
VW
182 zhdr->first_chunks = 0;
183 zhdr->middle_chunks = 0;
184 zhdr->last_chunks = 0;
185 zhdr->first_num = 0;
186 zhdr->start_middle = 0;
d30561c5
VW
187 zhdr->cpu = -1;
188 zhdr->pool = pool;
9a001fc1 189 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 190 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
191 return zhdr;
192}
193
194/* Resets the struct page fields and frees the page */
5a27aa82 195static void free_z3fold_page(struct page *page)
9a001fc1 196{
5a27aa82
VW
197 __free_page(page);
198}
199
2f1e5e4d
VW
200/* Lock a z3fold page */
201static inline void z3fold_page_lock(struct z3fold_header *zhdr)
202{
203 spin_lock(&zhdr->page_lock);
204}
205
76e32a2a
VW
206/* Try to lock a z3fold page */
207static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
208{
209 return spin_trylock(&zhdr->page_lock);
210}
211
2f1e5e4d
VW
212/* Unlock a z3fold page */
213static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
214{
215 spin_unlock(&zhdr->page_lock);
216}
217
9a001fc1
VW
218/*
219 * Encodes the handle of a particular buddy within a z3fold page
220 * Pool lock should be held as this function accesses first_num
221 */
222static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
223{
224 unsigned long handle;
225
226 handle = (unsigned long)zhdr;
ca0246bb
VW
227 if (bud != HEADLESS) {
228 handle |= (bud + zhdr->first_num) & BUDDY_MASK;
229 if (bud == LAST)
230 handle |= (zhdr->last_chunks << BUDDY_SHIFT);
231 }
9a001fc1
VW
232 return handle;
233}
234
235/* Returns the z3fold page where a given handle is stored */
236static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
237{
238 return (struct z3fold_header *)(handle & PAGE_MASK);
239}
240
ca0246bb
VW
241/* only for LAST bud, returns zero otherwise */
242static unsigned short handle_to_chunks(unsigned long handle)
243{
244 return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
245}
246
f201ebd8 247/*
248 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
249 * but that doesn't matter. because the masking will result in the
250 * correct buddy number.
251 */
9a001fc1
VW
252static enum buddy handle_to_buddy(unsigned long handle)
253{
254 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
255 return (handle - zhdr->first_num) & BUDDY_MASK;
256}
257
9050cce1
VW
258static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
259{
260 return zhdr->pool;
261}
262
d30561c5
VW
263static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
264{
265 struct page *page = virt_to_page(zhdr);
9050cce1 266 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
267
268 WARN_ON(!list_empty(&zhdr->buddy));
269 set_bit(PAGE_STALE, &page->private);
35529357 270 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
271 spin_lock(&pool->lock);
272 if (!list_empty(&page->lru))
273 list_del(&page->lru);
274 spin_unlock(&pool->lock);
275 if (locked)
276 z3fold_page_unlock(zhdr);
277 spin_lock(&pool->stale_lock);
278 list_add(&zhdr->buddy, &pool->stale);
279 queue_work(pool->release_wq, &pool->work);
280 spin_unlock(&pool->stale_lock);
281}
282
283static void __attribute__((__unused__))
284 release_z3fold_page(struct kref *ref)
285{
286 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
287 refcount);
288 __release_z3fold_page(zhdr, false);
289}
290
291static void release_z3fold_page_locked(struct kref *ref)
292{
293 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
294 refcount);
295 WARN_ON(z3fold_page_trylock(zhdr));
296 __release_z3fold_page(zhdr, true);
297}
298
299static void release_z3fold_page_locked_list(struct kref *ref)
300{
301 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
302 refcount);
9050cce1
VW
303 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
304 spin_lock(&pool->lock);
d30561c5 305 list_del_init(&zhdr->buddy);
9050cce1 306 spin_unlock(&pool->lock);
d30561c5
VW
307
308 WARN_ON(z3fold_page_trylock(zhdr));
309 __release_z3fold_page(zhdr, true);
310}
311
312static void free_pages_work(struct work_struct *w)
313{
314 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
315
316 spin_lock(&pool->stale_lock);
317 while (!list_empty(&pool->stale)) {
318 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
319 struct z3fold_header, buddy);
320 struct page *page = virt_to_page(zhdr);
321
322 list_del(&zhdr->buddy);
323 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
324 continue;
d30561c5
VW
325 spin_unlock(&pool->stale_lock);
326 cancel_work_sync(&zhdr->work);
327 free_z3fold_page(page);
328 cond_resched();
329 spin_lock(&pool->stale_lock);
330 }
331 spin_unlock(&pool->stale_lock);
332}
333
9a001fc1
VW
334/*
335 * Returns the number of free chunks in a z3fold page.
336 * NB: can't be used with HEADLESS pages.
337 */
338static int num_free_chunks(struct z3fold_header *zhdr)
339{
340 int nfree;
341 /*
342 * If there is a middle object, pick up the bigger free space
343 * either before or after it. Otherwise just subtract the number
344 * of chunks occupied by the first and the last objects.
345 */
346 if (zhdr->middle_chunks != 0) {
347 int nfree_before = zhdr->first_chunks ?
ede93213 348 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 349 int nfree_after = zhdr->last_chunks ?
ede93213
VW
350 0 : TOTAL_CHUNKS -
351 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
352 nfree = max(nfree_before, nfree_after);
353 } else
354 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
355 return nfree;
356}
357
9050cce1
VW
358/* Add to the appropriate unbuddied list */
359static inline void add_to_unbuddied(struct z3fold_pool *pool,
360 struct z3fold_header *zhdr)
361{
362 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
363 zhdr->middle_chunks == 0) {
364 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
365
366 int freechunks = num_free_chunks(zhdr);
367 spin_lock(&pool->lock);
368 list_add(&zhdr->buddy, &unbuddied[freechunks]);
369 spin_unlock(&pool->lock);
370 zhdr->cpu = smp_processor_id();
371 put_cpu_ptr(pool->unbuddied);
372 }
373}
374
ede93213
VW
375static inline void *mchunk_memmove(struct z3fold_header *zhdr,
376 unsigned short dst_chunk)
377{
378 void *beg = zhdr;
379 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
380 beg + (zhdr->start_middle << CHUNK_SHIFT),
381 zhdr->middle_chunks << CHUNK_SHIFT);
382}
383
1b096e5a 384#define BIG_CHUNK_GAP 3
9a001fc1
VW
385/* Has to be called with lock held */
386static int z3fold_compact_page(struct z3fold_header *zhdr)
387{
388 struct page *page = virt_to_page(zhdr);
9a001fc1 389
ede93213
VW
390 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
391 return 0; /* can't move middle chunk, it's used */
9a001fc1 392
ede93213
VW
393 if (zhdr->middle_chunks == 0)
394 return 0; /* nothing to compact */
395
396 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
397 /* move to the beginning */
398 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
399 zhdr->first_chunks = zhdr->middle_chunks;
400 zhdr->middle_chunks = 0;
401 zhdr->start_middle = 0;
402 zhdr->first_num++;
1b096e5a 403 return 1;
9a001fc1 404 }
1b096e5a
VW
405
406 /*
407 * moving data is expensive, so let's only do that if
408 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
409 */
410 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
411 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
412 BIG_CHUNK_GAP) {
413 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
414 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
415 return 1;
416 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
417 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
418 + zhdr->middle_chunks) >=
419 BIG_CHUNK_GAP) {
420 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
421 zhdr->middle_chunks;
422 mchunk_memmove(zhdr, new_start);
423 zhdr->start_middle = new_start;
424 return 1;
425 }
426
427 return 0;
9a001fc1
VW
428}
429
d30561c5
VW
430static void do_compact_page(struct z3fold_header *zhdr, bool locked)
431{
9050cce1 432 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 433 struct page *page;
d30561c5
VW
434
435 page = virt_to_page(zhdr);
436 if (locked)
437 WARN_ON(z3fold_page_trylock(zhdr));
438 else
439 z3fold_page_lock(zhdr);
5d03a661 440 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
441 z3fold_page_unlock(zhdr);
442 return;
443 }
444 spin_lock(&pool->lock);
445 list_del_init(&zhdr->buddy);
446 spin_unlock(&pool->lock);
447
5d03a661
VW
448 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
449 atomic64_dec(&pool->pages_nr);
450 return;
451 }
452
d30561c5 453 z3fold_compact_page(zhdr);
9050cce1 454 add_to_unbuddied(pool, zhdr);
d30561c5
VW
455 z3fold_page_unlock(zhdr);
456}
457
458static void compact_page_work(struct work_struct *w)
459{
460 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
461 work);
462
463 do_compact_page(zhdr, false);
464}
465
9050cce1
VW
466/* returns _locked_ z3fold page header or NULL */
467static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
468 size_t size, bool can_sleep)
469{
470 struct z3fold_header *zhdr = NULL;
471 struct page *page;
472 struct list_head *unbuddied;
473 int chunks = size_to_chunks(size), i;
474
475lookup:
476 /* First, try to find an unbuddied z3fold page. */
477 unbuddied = get_cpu_ptr(pool->unbuddied);
478 for_each_unbuddied_list(i, chunks) {
479 struct list_head *l = &unbuddied[i];
480
481 zhdr = list_first_entry_or_null(READ_ONCE(l),
482 struct z3fold_header, buddy);
483
484 if (!zhdr)
485 continue;
486
487 /* Re-check under lock. */
488 spin_lock(&pool->lock);
489 l = &unbuddied[i];
490 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
491 struct z3fold_header, buddy)) ||
492 !z3fold_page_trylock(zhdr)) {
493 spin_unlock(&pool->lock);
494 zhdr = NULL;
495 put_cpu_ptr(pool->unbuddied);
496 if (can_sleep)
497 cond_resched();
498 goto lookup;
499 }
500 list_del_init(&zhdr->buddy);
501 zhdr->cpu = -1;
502 spin_unlock(&pool->lock);
503
504 page = virt_to_page(zhdr);
505 if (test_bit(NEEDS_COMPACTING, &page->private)) {
506 z3fold_page_unlock(zhdr);
507 zhdr = NULL;
508 put_cpu_ptr(pool->unbuddied);
509 if (can_sleep)
510 cond_resched();
511 goto lookup;
512 }
513
514 /*
515 * this page could not be removed from its unbuddied
516 * list while pool lock was held, and then we've taken
517 * page lock so kref_put could not be called before
518 * we got here, so it's safe to just call kref_get()
519 */
520 kref_get(&zhdr->refcount);
521 break;
522 }
523 put_cpu_ptr(pool->unbuddied);
524
525 return zhdr;
526}
d30561c5
VW
527
528/*
529 * API Functions
530 */
531
532/**
533 * z3fold_create_pool() - create a new z3fold pool
534 * @name: pool name
535 * @gfp: gfp flags when allocating the z3fold pool structure
536 * @ops: user-defined operations for the z3fold pool
537 *
538 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
539 * failed.
540 */
541static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
542 const struct z3fold_ops *ops)
543{
544 struct z3fold_pool *pool = NULL;
545 int i, cpu;
546
547 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
548 if (!pool)
549 goto out;
550 spin_lock_init(&pool->lock);
551 spin_lock_init(&pool->stale_lock);
552 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1ec6995d
XW
553 if (!pool->unbuddied)
554 goto out_pool;
d30561c5
VW
555 for_each_possible_cpu(cpu) {
556 struct list_head *unbuddied =
557 per_cpu_ptr(pool->unbuddied, cpu);
558 for_each_unbuddied_list(i, 0)
559 INIT_LIST_HEAD(&unbuddied[i]);
560 }
561 INIT_LIST_HEAD(&pool->lru);
562 INIT_LIST_HEAD(&pool->stale);
563 atomic64_set(&pool->pages_nr, 0);
564 pool->name = name;
565 pool->compact_wq = create_singlethread_workqueue(pool->name);
566 if (!pool->compact_wq)
1ec6995d 567 goto out_unbuddied;
d30561c5
VW
568 pool->release_wq = create_singlethread_workqueue(pool->name);
569 if (!pool->release_wq)
570 goto out_wq;
571 INIT_WORK(&pool->work, free_pages_work);
572 pool->ops = ops;
573 return pool;
574
575out_wq:
576 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
577out_unbuddied:
578 free_percpu(pool->unbuddied);
579out_pool:
d30561c5 580 kfree(pool);
1ec6995d 581out:
d30561c5
VW
582 return NULL;
583}
584
585/**
586 * z3fold_destroy_pool() - destroys an existing z3fold pool
587 * @pool: the z3fold pool to be destroyed
588 *
589 * The pool should be emptied before this function is called.
590 */
591static void z3fold_destroy_pool(struct z3fold_pool *pool)
592{
593 destroy_workqueue(pool->release_wq);
594 destroy_workqueue(pool->compact_wq);
595 kfree(pool);
596}
597
9a001fc1
VW
598/**
599 * z3fold_alloc() - allocates a region of a given size
600 * @pool: z3fold pool from which to allocate
601 * @size: size in bytes of the desired allocation
602 * @gfp: gfp flags used if the pool needs to grow
603 * @handle: handle of the new allocation
604 *
605 * This function will attempt to find a free region in the pool large enough to
606 * satisfy the allocation request. A search of the unbuddied lists is
607 * performed first. If no suitable free region is found, then a new page is
608 * allocated and added to the pool to satisfy the request.
609 *
610 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
611 * as z3fold pool pages.
612 *
613 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
614 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
615 * a new page.
616 */
617static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
618 unsigned long *handle)
619{
9050cce1 620 int chunks = size_to_chunks(size);
9a001fc1 621 struct z3fold_header *zhdr = NULL;
d30561c5 622 struct page *page = NULL;
9a001fc1 623 enum buddy bud;
8a97ea54 624 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1
VW
625
626 if (!size || (gfp & __GFP_HIGHMEM))
627 return -EINVAL;
628
629 if (size > PAGE_SIZE)
630 return -ENOSPC;
631
632 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
633 bud = HEADLESS;
634 else {
9050cce1
VW
635retry:
636 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 637 if (zhdr) {
2f1e5e4d
VW
638 if (zhdr->first_chunks == 0) {
639 if (zhdr->middle_chunks != 0 &&
640 chunks >= zhdr->start_middle)
9a001fc1 641 bud = LAST;
2f1e5e4d
VW
642 else
643 bud = FIRST;
644 } else if (zhdr->last_chunks == 0)
645 bud = LAST;
646 else if (zhdr->middle_chunks == 0)
647 bud = MIDDLE;
648 else {
5a27aa82 649 if (kref_put(&zhdr->refcount,
d30561c5 650 release_z3fold_page_locked))
5a27aa82 651 atomic64_dec(&pool->pages_nr);
d30561c5
VW
652 else
653 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
654 pr_err("No free chunks in unbuddied\n");
655 WARN_ON(1);
9050cce1 656 goto retry;
9a001fc1 657 }
9050cce1 658 page = virt_to_page(zhdr);
2f1e5e4d 659 goto found;
9a001fc1
VW
660 }
661 bud = FIRST;
9a001fc1
VW
662 }
663
5c9bab59
VW
664 page = NULL;
665 if (can_sleep) {
666 spin_lock(&pool->stale_lock);
667 zhdr = list_first_entry_or_null(&pool->stale,
668 struct z3fold_header, buddy);
669 /*
670 * Before allocating a page, let's see if we can take one from
671 * the stale pages list. cancel_work_sync() can sleep so we
672 * limit this case to the contexts where we can sleep
673 */
674 if (zhdr) {
675 list_del(&zhdr->buddy);
676 spin_unlock(&pool->stale_lock);
d30561c5 677 cancel_work_sync(&zhdr->work);
5c9bab59
VW
678 page = virt_to_page(zhdr);
679 } else {
680 spin_unlock(&pool->stale_lock);
681 }
d30561c5 682 }
5c9bab59
VW
683 if (!page)
684 page = alloc_page(gfp);
d30561c5 685
9a001fc1
VW
686 if (!page)
687 return -ENOMEM;
2f1e5e4d 688
d30561c5 689 zhdr = init_z3fold_page(page, pool);
9050cce1
VW
690 if (!zhdr) {
691 __free_page(page);
692 return -ENOMEM;
693 }
694 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
695
696 if (bud == HEADLESS) {
697 set_bit(PAGE_HEADLESS, &page->private);
698 goto headless;
699 }
2f1e5e4d 700 z3fold_page_lock(zhdr);
9a001fc1
VW
701
702found:
703 if (bud == FIRST)
704 zhdr->first_chunks = chunks;
705 else if (bud == LAST)
706 zhdr->last_chunks = chunks;
707 else {
708 zhdr->middle_chunks = chunks;
ede93213 709 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 710 }
9050cce1 711 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
712
713headless:
d30561c5 714 spin_lock(&pool->lock);
9a001fc1
VW
715 /* Add/move z3fold page to beginning of LRU */
716 if (!list_empty(&page->lru))
717 list_del(&page->lru);
718
719 list_add(&page->lru, &pool->lru);
720
721 *handle = encode_handle(zhdr, bud);
722 spin_unlock(&pool->lock);
2f1e5e4d
VW
723 if (bud != HEADLESS)
724 z3fold_page_unlock(zhdr);
9a001fc1
VW
725
726 return 0;
727}
728
729/**
730 * z3fold_free() - frees the allocation associated with the given handle
731 * @pool: pool in which the allocation resided
732 * @handle: handle associated with the allocation returned by z3fold_alloc()
733 *
734 * In the case that the z3fold page in which the allocation resides is under
735 * reclaim, as indicated by the PG_reclaim flag being set, this function
736 * only sets the first|last_chunks to 0. The page is actually freed
737 * once both buddies are evicted (see z3fold_reclaim_page() below).
738 */
739static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
740{
741 struct z3fold_header *zhdr;
9a001fc1
VW
742 struct page *page;
743 enum buddy bud;
744
9a001fc1
VW
745 zhdr = handle_to_z3fold_header(handle);
746 page = virt_to_page(zhdr);
747
748 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
749 /* if a headless page is under reclaim, just leave.
750 * NB: we use test_and_set_bit for a reason: if the bit
751 * has not been set before, we release this page
752 * immediately so we don't care about its value any more.
753 */
754 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
755 spin_lock(&pool->lock);
756 list_del(&page->lru);
757 spin_unlock(&pool->lock);
758 free_z3fold_page(page);
759 atomic64_dec(&pool->pages_nr);
9a001fc1 760 }
ca0246bb 761 return;
9a001fc1
VW
762 }
763
ca0246bb
VW
764 /* Non-headless case */
765 z3fold_page_lock(zhdr);
766 bud = handle_to_buddy(handle);
767
768 switch (bud) {
769 case FIRST:
770 zhdr->first_chunks = 0;
771 break;
772 case MIDDLE:
773 zhdr->middle_chunks = 0;
774 break;
775 case LAST:
776 zhdr->last_chunks = 0;
777 break;
778 default:
779 pr_err("%s: unknown bud %d\n", __func__, bud);
780 WARN_ON(1);
781 z3fold_page_unlock(zhdr);
d30561c5
VW
782 return;
783 }
784
785 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
786 atomic64_dec(&pool->pages_nr);
787 return;
788 }
ca0246bb 789 if (test_bit(PAGE_CLAIMED, &page->private)) {
6098d7e1
VW
790 z3fold_page_unlock(zhdr);
791 return;
792 }
d30561c5 793 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5a27aa82 794 z3fold_page_unlock(zhdr);
d30561c5
VW
795 return;
796 }
797 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 798 spin_lock(&pool->lock);
d30561c5 799 list_del_init(&zhdr->buddy);
2f1e5e4d 800 spin_unlock(&pool->lock);
d30561c5 801 zhdr->cpu = -1;
5d03a661 802 kref_get(&zhdr->refcount);
d30561c5
VW
803 do_compact_page(zhdr, true);
804 return;
9a001fc1 805 }
5d03a661 806 kref_get(&zhdr->refcount);
d30561c5
VW
807 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
808 z3fold_page_unlock(zhdr);
9a001fc1
VW
809}
810
811/**
812 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
813 * @pool: pool from which a page will attempt to be evicted
f144c390 814 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
815 * be attempted before failing
816 *
817 * z3fold reclaim is different from normal system reclaim in that it is done
818 * from the bottom, up. This is because only the bottom layer, z3fold, has
819 * information on how the allocations are organized within each z3fold page.
820 * This has the potential to create interesting locking situations between
821 * z3fold and the user, however.
822 *
823 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 824 *
9a001fc1
VW
825 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
826 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
827 * call the user-defined eviction handler with the pool and handle as
828 * arguments.
829 *
830 * If the handle can not be evicted, the eviction handler should return
831 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
832 * appropriate list and try the next z3fold page on the LRU up to
833 * a user defined number of retries.
834 *
835 * If the handle is successfully evicted, the eviction handler should
836 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
837 * contains logic to delay freeing the page if the page is under reclaim,
838 * as indicated by the setting of the PG_reclaim flag on the underlying page.
839 *
840 * If all buddies in the z3fold page are successfully evicted, then the
841 * z3fold page can be freed.
842 *
843 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
844 * no pages to evict or an eviction handler is not registered, -EAGAIN if
845 * the retry limit was hit.
846 */
847static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
848{
d30561c5
VW
849 int i, ret = 0;
850 struct z3fold_header *zhdr = NULL;
851 struct page *page = NULL;
852 struct list_head *pos;
9a001fc1
VW
853 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
854
855 spin_lock(&pool->lock);
2f1e5e4d 856 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
857 spin_unlock(&pool->lock);
858 return -EINVAL;
859 }
860 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
861 if (list_empty(&pool->lru)) {
862 spin_unlock(&pool->lock);
863 return -EINVAL;
864 }
d30561c5
VW
865 list_for_each_prev(pos, &pool->lru) {
866 page = list_entry(pos, struct page, lru);
ca0246bb
VW
867
868 /* this bit could have been set by free, in which case
869 * we pass over to the next page in the pool.
870 */
871 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
872 continue;
873
874 zhdr = page_address(page);
d30561c5 875 if (test_bit(PAGE_HEADLESS, &page->private))
d30561c5
VW
876 break;
877
ca0246bb
VW
878 if (!z3fold_page_trylock(zhdr)) {
879 zhdr = NULL;
d30561c5 880 continue; /* can't evict at this point */
ca0246bb 881 }
d30561c5
VW
882 kref_get(&zhdr->refcount);
883 list_del_init(&zhdr->buddy);
884 zhdr->cpu = -1;
6098d7e1 885 break;
d30561c5
VW
886 }
887
ca0246bb
VW
888 if (!zhdr)
889 break;
890
5a27aa82 891 list_del_init(&page->lru);
d30561c5 892 spin_unlock(&pool->lock);
9a001fc1 893
9a001fc1 894 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1
VW
895 /*
896 * We need encode the handles before unlocking, since
897 * we can race with free that will set
898 * (first|last)_chunks to 0
899 */
900 first_handle = 0;
901 last_handle = 0;
902 middle_handle = 0;
903 if (zhdr->first_chunks)
904 first_handle = encode_handle(zhdr, FIRST);
905 if (zhdr->middle_chunks)
906 middle_handle = encode_handle(zhdr, MIDDLE);
907 if (zhdr->last_chunks)
908 last_handle = encode_handle(zhdr, LAST);
d30561c5
VW
909 /*
910 * it's safe to unlock here because we hold a
911 * reference to this page
912 */
2f1e5e4d 913 z3fold_page_unlock(zhdr);
9a001fc1
VW
914 } else {
915 first_handle = encode_handle(zhdr, HEADLESS);
916 last_handle = middle_handle = 0;
917 }
918
9a001fc1
VW
919 /* Issue the eviction callback(s) */
920 if (middle_handle) {
921 ret = pool->ops->evict(pool, middle_handle);
922 if (ret)
923 goto next;
924 }
925 if (first_handle) {
926 ret = pool->ops->evict(pool, first_handle);
927 if (ret)
928 goto next;
929 }
930 if (last_handle) {
931 ret = pool->ops->evict(pool, last_handle);
932 if (ret)
933 goto next;
934 }
935next:
5a27aa82
VW
936 if (test_bit(PAGE_HEADLESS, &page->private)) {
937 if (ret == 0) {
938 free_z3fold_page(page);
ca0246bb 939 atomic64_dec(&pool->pages_nr);
5a27aa82 940 return 0;
5a27aa82 941 }
6098d7e1
VW
942 spin_lock(&pool->lock);
943 list_add(&page->lru, &pool->lru);
944 spin_unlock(&pool->lock);
945 } else {
946 z3fold_page_lock(zhdr);
ca0246bb 947 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1
VW
948 if (kref_put(&zhdr->refcount,
949 release_z3fold_page_locked)) {
950 atomic64_dec(&pool->pages_nr);
951 return 0;
952 }
953 /*
954 * if we are here, the page is still not completely
955 * free. Take the global pool lock then to be able
956 * to add it back to the lru list
957 */
958 spin_lock(&pool->lock);
959 list_add(&page->lru, &pool->lru);
d5567c9d 960 spin_unlock(&pool->lock);
6098d7e1 961 z3fold_page_unlock(zhdr);
5a27aa82 962 }
2f1e5e4d 963
6098d7e1
VW
964 /* We started off locked to we need to lock the pool back */
965 spin_lock(&pool->lock);
9a001fc1
VW
966 }
967 spin_unlock(&pool->lock);
968 return -EAGAIN;
969}
970
971/**
972 * z3fold_map() - maps the allocation associated with the given handle
973 * @pool: pool in which the allocation resides
974 * @handle: handle associated with the allocation to be mapped
975 *
976 * Extracts the buddy number from handle and constructs the pointer to the
977 * correct starting chunk within the page.
978 *
979 * Returns: a pointer to the mapped allocation
980 */
981static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
982{
983 struct z3fold_header *zhdr;
984 struct page *page;
985 void *addr;
986 enum buddy buddy;
987
9a001fc1
VW
988 zhdr = handle_to_z3fold_header(handle);
989 addr = zhdr;
990 page = virt_to_page(zhdr);
991
992 if (test_bit(PAGE_HEADLESS, &page->private))
993 goto out;
994
2f1e5e4d 995 z3fold_page_lock(zhdr);
9a001fc1
VW
996 buddy = handle_to_buddy(handle);
997 switch (buddy) {
998 case FIRST:
999 addr += ZHDR_SIZE_ALIGNED;
1000 break;
1001 case MIDDLE:
1002 addr += zhdr->start_middle << CHUNK_SHIFT;
1003 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1004 break;
1005 case LAST:
ca0246bb 1006 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1007 break;
1008 default:
1009 pr_err("unknown buddy id %d\n", buddy);
1010 WARN_ON(1);
1011 addr = NULL;
1012 break;
1013 }
2f1e5e4d
VW
1014
1015 z3fold_page_unlock(zhdr);
9a001fc1 1016out:
9a001fc1
VW
1017 return addr;
1018}
1019
1020/**
1021 * z3fold_unmap() - unmaps the allocation associated with the given handle
1022 * @pool: pool in which the allocation resides
1023 * @handle: handle associated with the allocation to be unmapped
1024 */
1025static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1026{
1027 struct z3fold_header *zhdr;
1028 struct page *page;
1029 enum buddy buddy;
1030
9a001fc1
VW
1031 zhdr = handle_to_z3fold_header(handle);
1032 page = virt_to_page(zhdr);
1033
2f1e5e4d 1034 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1035 return;
9a001fc1 1036
2f1e5e4d 1037 z3fold_page_lock(zhdr);
9a001fc1
VW
1038 buddy = handle_to_buddy(handle);
1039 if (buddy == MIDDLE)
1040 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
2f1e5e4d 1041 z3fold_page_unlock(zhdr);
9a001fc1
VW
1042}
1043
1044/**
1045 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1046 * @pool: pool whose size is being queried
1047 *
12d59ae6 1048 * Returns: size in pages of the given pool.
9a001fc1
VW
1049 */
1050static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1051{
12d59ae6 1052 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1053}
1054
1055/*****************
1056 * zpool
1057 ****************/
1058
1059static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1060{
1061 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1062 return pool->zpool_ops->evict(pool->zpool, handle);
1063 else
1064 return -ENOENT;
1065}
1066
1067static const struct z3fold_ops z3fold_zpool_ops = {
1068 .evict = z3fold_zpool_evict
1069};
1070
1071static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1072 const struct zpool_ops *zpool_ops,
1073 struct zpool *zpool)
1074{
1075 struct z3fold_pool *pool;
1076
d30561c5
VW
1077 pool = z3fold_create_pool(name, gfp,
1078 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1079 if (pool) {
1080 pool->zpool = zpool;
1081 pool->zpool_ops = zpool_ops;
1082 }
1083 return pool;
1084}
1085
1086static void z3fold_zpool_destroy(void *pool)
1087{
1088 z3fold_destroy_pool(pool);
1089}
1090
1091static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1092 unsigned long *handle)
1093{
1094 return z3fold_alloc(pool, size, gfp, handle);
1095}
1096static void z3fold_zpool_free(void *pool, unsigned long handle)
1097{
1098 z3fold_free(pool, handle);
1099}
1100
1101static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1102 unsigned int *reclaimed)
1103{
1104 unsigned int total = 0;
1105 int ret = -EINVAL;
1106
1107 while (total < pages) {
1108 ret = z3fold_reclaim_page(pool, 8);
1109 if (ret < 0)
1110 break;
1111 total++;
1112 }
1113
1114 if (reclaimed)
1115 *reclaimed = total;
1116
1117 return ret;
1118}
1119
1120static void *z3fold_zpool_map(void *pool, unsigned long handle,
1121 enum zpool_mapmode mm)
1122{
1123 return z3fold_map(pool, handle);
1124}
1125static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1126{
1127 z3fold_unmap(pool, handle);
1128}
1129
1130static u64 z3fold_zpool_total_size(void *pool)
1131{
1132 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1133}
1134
1135static struct zpool_driver z3fold_zpool_driver = {
1136 .type = "z3fold",
1137 .owner = THIS_MODULE,
1138 .create = z3fold_zpool_create,
1139 .destroy = z3fold_zpool_destroy,
1140 .malloc = z3fold_zpool_malloc,
1141 .free = z3fold_zpool_free,
1142 .shrink = z3fold_zpool_shrink,
1143 .map = z3fold_zpool_map,
1144 .unmap = z3fold_zpool_unmap,
1145 .total_size = z3fold_zpool_total_size,
1146};
1147
1148MODULE_ALIAS("zpool-z3fold");
1149
1150static int __init init_z3fold(void)
1151{
ede93213
VW
1152 /* Make sure the z3fold header is not larger than the page size */
1153 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
9a001fc1
VW
1154 zpool_register_driver(&z3fold_zpool_driver);
1155
1156 return 0;
1157}
1158
1159static void __exit exit_z3fold(void)
1160{
1161 zpool_unregister_driver(&z3fold_zpool_driver);
1162}
1163
1164module_init(init_z3fold);
1165module_exit(exit_z3fold);
1166
1167MODULE_LICENSE("GPL");
1168MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1169MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");