]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/z3fold.c
Merge branch 'work.mount0' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[thirdparty/linux.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46
47 /*
48 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
49 * adjusting internal fragmentation. It also determines the number of
50 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
51 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
52 * in the beginning of an allocated page are occupied by z3fold header, so
53 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
54 * which shows the max number of free chunks in z3fold page, also there will
55 * be 63, or 62, respectively, freelists per pool.
56 */
57 #define NCHUNKS_ORDER 6
58
59 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
60 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
61 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
62 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
63 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
64 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
65
66 #define BUDDY_MASK (0x3)
67 #define BUDDY_SHIFT 2
68 #define SLOTS_ALIGN (0x40)
69
70 /*****************
71 * Structures
72 *****************/
73 struct z3fold_pool;
74 struct z3fold_ops {
75 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
76 };
77
78 enum buddy {
79 HEADLESS = 0,
80 FIRST,
81 MIDDLE,
82 LAST,
83 BUDDIES_MAX = LAST
84 };
85
86 struct z3fold_buddy_slots {
87 /*
88 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
89 * be enough slots to hold all possible variants
90 */
91 unsigned long slot[BUDDY_MASK + 1];
92 unsigned long pool; /* back link + flags */
93 };
94 #define HANDLE_FLAG_MASK (0x03)
95
96 /*
97 * struct z3fold_header - z3fold page metadata occupying first chunks of each
98 * z3fold page, except for HEADLESS pages
99 * @buddy: links the z3fold page into the relevant list in the
100 * pool
101 * @page_lock: per-page lock
102 * @refcount: reference count for the z3fold page
103 * @work: work_struct for page layout optimization
104 * @slots: pointer to the structure holding buddy slots
105 * @pool: pointer to the containing pool
106 * @cpu: CPU which this page "belongs" to
107 * @first_chunks: the size of the first buddy in chunks, 0 if free
108 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
109 * @last_chunks: the size of the last buddy in chunks, 0 if free
110 * @first_num: the starting number (for the first handle)
111 * @mapped_count: the number of objects currently mapped
112 */
113 struct z3fold_header {
114 struct list_head buddy;
115 spinlock_t page_lock;
116 struct kref refcount;
117 struct work_struct work;
118 struct z3fold_buddy_slots *slots;
119 struct z3fold_pool *pool;
120 short cpu;
121 unsigned short first_chunks;
122 unsigned short middle_chunks;
123 unsigned short last_chunks;
124 unsigned short start_middle;
125 unsigned short first_num:2;
126 unsigned short mapped_count:2;
127 };
128
129 /**
130 * struct z3fold_pool - stores metadata for each z3fold pool
131 * @name: pool name
132 * @lock: protects pool unbuddied/lru lists
133 * @stale_lock: protects pool stale page list
134 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
135 * buddies; the list each z3fold page is added to depends on
136 * the size of its free region.
137 * @lru: list tracking the z3fold pages in LRU order by most recently
138 * added buddy.
139 * @stale: list of pages marked for freeing
140 * @pages_nr: number of z3fold pages in the pool.
141 * @c_handle: cache for z3fold_buddy_slots allocation
142 * @ops: pointer to a structure of user defined operations specified at
143 * pool creation time.
144 * @compact_wq: workqueue for page layout background optimization
145 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem
148 *
149 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool.
151 */
152 struct z3fold_pool {
153 const char *name;
154 spinlock_t lock;
155 spinlock_t stale_lock;
156 struct list_head *unbuddied;
157 struct list_head lru;
158 struct list_head stale;
159 atomic64_t pages_nr;
160 struct kmem_cache *c_handle;
161 const struct z3fold_ops *ops;
162 struct zpool *zpool;
163 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq;
166 struct work_struct work;
167 struct inode *inode;
168 };
169
170 /*
171 * Internal z3fold page flags
172 */
173 enum z3fold_page_flags {
174 PAGE_HEADLESS = 0,
175 MIDDLE_CHUNK_MAPPED,
176 NEEDS_COMPACTING,
177 PAGE_STALE,
178 PAGE_CLAIMED, /* by either reclaim or free */
179 };
180
181 /*****************
182 * Helpers
183 *****************/
184
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size)
187 {
188 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190
191 #define for_each_unbuddied_list(_iter, _begin) \
192 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193
194 static void compact_page_work(struct work_struct *w);
195
196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 gfp_t gfp)
198 {
199 struct z3fold_buddy_slots *slots;
200
201 slots = kmem_cache_alloc(pool->c_handle,
202 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
203
204 if (slots) {
205 memset(slots->slot, 0, sizeof(slots->slot));
206 slots->pool = (unsigned long)pool;
207 }
208
209 return slots;
210 }
211
212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213 {
214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215 }
216
217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218 {
219 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220 }
221
222 static inline void free_handle(unsigned long handle)
223 {
224 struct z3fold_buddy_slots *slots;
225 int i;
226 bool is_free;
227
228 if (handle & (1 << PAGE_HEADLESS))
229 return;
230
231 WARN_ON(*(unsigned long *)handle == 0);
232 *(unsigned long *)handle = 0;
233 slots = handle_to_slots(handle);
234 is_free = true;
235 for (i = 0; i <= BUDDY_MASK; i++) {
236 if (slots->slot[i]) {
237 is_free = false;
238 break;
239 }
240 }
241
242 if (is_free) {
243 struct z3fold_pool *pool = slots_to_pool(slots);
244
245 kmem_cache_free(pool->c_handle, slots);
246 }
247 }
248
249 static int z3fold_init_fs_context(struct fs_context *fc)
250 {
251 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
252 }
253
254 static struct file_system_type z3fold_fs = {
255 .name = "z3fold",
256 .init_fs_context = z3fold_init_fs_context,
257 .kill_sb = kill_anon_super,
258 };
259
260 static struct vfsmount *z3fold_mnt;
261 static int z3fold_mount(void)
262 {
263 int ret = 0;
264
265 z3fold_mnt = kern_mount(&z3fold_fs);
266 if (IS_ERR(z3fold_mnt))
267 ret = PTR_ERR(z3fold_mnt);
268
269 return ret;
270 }
271
272 static void z3fold_unmount(void)
273 {
274 kern_unmount(z3fold_mnt);
275 }
276
277 static const struct address_space_operations z3fold_aops;
278 static int z3fold_register_migration(struct z3fold_pool *pool)
279 {
280 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
281 if (IS_ERR(pool->inode)) {
282 pool->inode = NULL;
283 return 1;
284 }
285
286 pool->inode->i_mapping->private_data = pool;
287 pool->inode->i_mapping->a_ops = &z3fold_aops;
288 return 0;
289 }
290
291 static void z3fold_unregister_migration(struct z3fold_pool *pool)
292 {
293 if (pool->inode)
294 iput(pool->inode);
295 }
296
297 /* Initializes the z3fold header of a newly allocated z3fold page */
298 static struct z3fold_header *init_z3fold_page(struct page *page,
299 struct z3fold_pool *pool, gfp_t gfp)
300 {
301 struct z3fold_header *zhdr = page_address(page);
302 struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
303
304 if (!slots)
305 return NULL;
306
307 INIT_LIST_HEAD(&page->lru);
308 clear_bit(PAGE_HEADLESS, &page->private);
309 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
310 clear_bit(NEEDS_COMPACTING, &page->private);
311 clear_bit(PAGE_STALE, &page->private);
312 clear_bit(PAGE_CLAIMED, &page->private);
313
314 spin_lock_init(&zhdr->page_lock);
315 kref_init(&zhdr->refcount);
316 zhdr->first_chunks = 0;
317 zhdr->middle_chunks = 0;
318 zhdr->last_chunks = 0;
319 zhdr->first_num = 0;
320 zhdr->start_middle = 0;
321 zhdr->cpu = -1;
322 zhdr->slots = slots;
323 zhdr->pool = pool;
324 INIT_LIST_HEAD(&zhdr->buddy);
325 INIT_WORK(&zhdr->work, compact_page_work);
326 return zhdr;
327 }
328
329 /* Resets the struct page fields and frees the page */
330 static void free_z3fold_page(struct page *page, bool headless)
331 {
332 if (!headless) {
333 lock_page(page);
334 __ClearPageMovable(page);
335 unlock_page(page);
336 }
337 ClearPagePrivate(page);
338 __free_page(page);
339 }
340
341 /* Lock a z3fold page */
342 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
343 {
344 spin_lock(&zhdr->page_lock);
345 }
346
347 /* Try to lock a z3fold page */
348 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
349 {
350 return spin_trylock(&zhdr->page_lock);
351 }
352
353 /* Unlock a z3fold page */
354 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
355 {
356 spin_unlock(&zhdr->page_lock);
357 }
358
359 /* Helper function to build the index */
360 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
361 {
362 return (bud + zhdr->first_num) & BUDDY_MASK;
363 }
364
365 /*
366 * Encodes the handle of a particular buddy within a z3fold page
367 * Pool lock should be held as this function accesses first_num
368 */
369 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
370 {
371 struct z3fold_buddy_slots *slots;
372 unsigned long h = (unsigned long)zhdr;
373 int idx = 0;
374
375 /*
376 * For a headless page, its handle is its pointer with the extra
377 * PAGE_HEADLESS bit set
378 */
379 if (bud == HEADLESS)
380 return h | (1 << PAGE_HEADLESS);
381
382 /* otherwise, return pointer to encoded handle */
383 idx = __idx(zhdr, bud);
384 h += idx;
385 if (bud == LAST)
386 h |= (zhdr->last_chunks << BUDDY_SHIFT);
387
388 slots = zhdr->slots;
389 slots->slot[idx] = h;
390 return (unsigned long)&slots->slot[idx];
391 }
392
393 /* Returns the z3fold page where a given handle is stored */
394 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
395 {
396 unsigned long addr = h;
397
398 if (!(addr & (1 << PAGE_HEADLESS)))
399 addr = *(unsigned long *)h;
400
401 return (struct z3fold_header *)(addr & PAGE_MASK);
402 }
403
404 /* only for LAST bud, returns zero otherwise */
405 static unsigned short handle_to_chunks(unsigned long handle)
406 {
407 unsigned long addr = *(unsigned long *)handle;
408
409 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
410 }
411
412 /*
413 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
414 * but that doesn't matter. because the masking will result in the
415 * correct buddy number.
416 */
417 static enum buddy handle_to_buddy(unsigned long handle)
418 {
419 struct z3fold_header *zhdr;
420 unsigned long addr;
421
422 WARN_ON(handle & (1 << PAGE_HEADLESS));
423 addr = *(unsigned long *)handle;
424 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
425 return (addr - zhdr->first_num) & BUDDY_MASK;
426 }
427
428 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
429 {
430 return zhdr->pool;
431 }
432
433 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
434 {
435 struct page *page = virt_to_page(zhdr);
436 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
437
438 WARN_ON(!list_empty(&zhdr->buddy));
439 set_bit(PAGE_STALE, &page->private);
440 clear_bit(NEEDS_COMPACTING, &page->private);
441 spin_lock(&pool->lock);
442 if (!list_empty(&page->lru))
443 list_del_init(&page->lru);
444 spin_unlock(&pool->lock);
445 if (locked)
446 z3fold_page_unlock(zhdr);
447 spin_lock(&pool->stale_lock);
448 list_add(&zhdr->buddy, &pool->stale);
449 queue_work(pool->release_wq, &pool->work);
450 spin_unlock(&pool->stale_lock);
451 }
452
453 static void __attribute__((__unused__))
454 release_z3fold_page(struct kref *ref)
455 {
456 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
457 refcount);
458 __release_z3fold_page(zhdr, false);
459 }
460
461 static void release_z3fold_page_locked(struct kref *ref)
462 {
463 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
464 refcount);
465 WARN_ON(z3fold_page_trylock(zhdr));
466 __release_z3fold_page(zhdr, true);
467 }
468
469 static void release_z3fold_page_locked_list(struct kref *ref)
470 {
471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 refcount);
473 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
474 spin_lock(&pool->lock);
475 list_del_init(&zhdr->buddy);
476 spin_unlock(&pool->lock);
477
478 WARN_ON(z3fold_page_trylock(zhdr));
479 __release_z3fold_page(zhdr, true);
480 }
481
482 static void free_pages_work(struct work_struct *w)
483 {
484 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
485
486 spin_lock(&pool->stale_lock);
487 while (!list_empty(&pool->stale)) {
488 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
489 struct z3fold_header, buddy);
490 struct page *page = virt_to_page(zhdr);
491
492 list_del(&zhdr->buddy);
493 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
494 continue;
495 spin_unlock(&pool->stale_lock);
496 cancel_work_sync(&zhdr->work);
497 free_z3fold_page(page, false);
498 cond_resched();
499 spin_lock(&pool->stale_lock);
500 }
501 spin_unlock(&pool->stale_lock);
502 }
503
504 /*
505 * Returns the number of free chunks in a z3fold page.
506 * NB: can't be used with HEADLESS pages.
507 */
508 static int num_free_chunks(struct z3fold_header *zhdr)
509 {
510 int nfree;
511 /*
512 * If there is a middle object, pick up the bigger free space
513 * either before or after it. Otherwise just subtract the number
514 * of chunks occupied by the first and the last objects.
515 */
516 if (zhdr->middle_chunks != 0) {
517 int nfree_before = zhdr->first_chunks ?
518 0 : zhdr->start_middle - ZHDR_CHUNKS;
519 int nfree_after = zhdr->last_chunks ?
520 0 : TOTAL_CHUNKS -
521 (zhdr->start_middle + zhdr->middle_chunks);
522 nfree = max(nfree_before, nfree_after);
523 } else
524 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
525 return nfree;
526 }
527
528 /* Add to the appropriate unbuddied list */
529 static inline void add_to_unbuddied(struct z3fold_pool *pool,
530 struct z3fold_header *zhdr)
531 {
532 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
533 zhdr->middle_chunks == 0) {
534 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
535
536 int freechunks = num_free_chunks(zhdr);
537 spin_lock(&pool->lock);
538 list_add(&zhdr->buddy, &unbuddied[freechunks]);
539 spin_unlock(&pool->lock);
540 zhdr->cpu = smp_processor_id();
541 put_cpu_ptr(pool->unbuddied);
542 }
543 }
544
545 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
546 unsigned short dst_chunk)
547 {
548 void *beg = zhdr;
549 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
550 beg + (zhdr->start_middle << CHUNK_SHIFT),
551 zhdr->middle_chunks << CHUNK_SHIFT);
552 }
553
554 #define BIG_CHUNK_GAP 3
555 /* Has to be called with lock held */
556 static int z3fold_compact_page(struct z3fold_header *zhdr)
557 {
558 struct page *page = virt_to_page(zhdr);
559
560 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
561 return 0; /* can't move middle chunk, it's used */
562
563 if (unlikely(PageIsolated(page)))
564 return 0;
565
566 if (zhdr->middle_chunks == 0)
567 return 0; /* nothing to compact */
568
569 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
570 /* move to the beginning */
571 mchunk_memmove(zhdr, ZHDR_CHUNKS);
572 zhdr->first_chunks = zhdr->middle_chunks;
573 zhdr->middle_chunks = 0;
574 zhdr->start_middle = 0;
575 zhdr->first_num++;
576 return 1;
577 }
578
579 /*
580 * moving data is expensive, so let's only do that if
581 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
582 */
583 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
584 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
585 BIG_CHUNK_GAP) {
586 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
587 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
588 return 1;
589 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
590 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
591 + zhdr->middle_chunks) >=
592 BIG_CHUNK_GAP) {
593 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
594 zhdr->middle_chunks;
595 mchunk_memmove(zhdr, new_start);
596 zhdr->start_middle = new_start;
597 return 1;
598 }
599
600 return 0;
601 }
602
603 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
604 {
605 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
606 struct page *page;
607
608 page = virt_to_page(zhdr);
609 if (locked)
610 WARN_ON(z3fold_page_trylock(zhdr));
611 else
612 z3fold_page_lock(zhdr);
613 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
614 z3fold_page_unlock(zhdr);
615 return;
616 }
617 spin_lock(&pool->lock);
618 list_del_init(&zhdr->buddy);
619 spin_unlock(&pool->lock);
620
621 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
622 atomic64_dec(&pool->pages_nr);
623 return;
624 }
625
626 if (unlikely(PageIsolated(page) ||
627 test_bit(PAGE_STALE, &page->private))) {
628 z3fold_page_unlock(zhdr);
629 return;
630 }
631
632 z3fold_compact_page(zhdr);
633 add_to_unbuddied(pool, zhdr);
634 z3fold_page_unlock(zhdr);
635 }
636
637 static void compact_page_work(struct work_struct *w)
638 {
639 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
640 work);
641
642 do_compact_page(zhdr, false);
643 }
644
645 /* returns _locked_ z3fold page header or NULL */
646 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
647 size_t size, bool can_sleep)
648 {
649 struct z3fold_header *zhdr = NULL;
650 struct page *page;
651 struct list_head *unbuddied;
652 int chunks = size_to_chunks(size), i;
653
654 lookup:
655 /* First, try to find an unbuddied z3fold page. */
656 unbuddied = get_cpu_ptr(pool->unbuddied);
657 for_each_unbuddied_list(i, chunks) {
658 struct list_head *l = &unbuddied[i];
659
660 zhdr = list_first_entry_or_null(READ_ONCE(l),
661 struct z3fold_header, buddy);
662
663 if (!zhdr)
664 continue;
665
666 /* Re-check under lock. */
667 spin_lock(&pool->lock);
668 l = &unbuddied[i];
669 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
670 struct z3fold_header, buddy)) ||
671 !z3fold_page_trylock(zhdr)) {
672 spin_unlock(&pool->lock);
673 zhdr = NULL;
674 put_cpu_ptr(pool->unbuddied);
675 if (can_sleep)
676 cond_resched();
677 goto lookup;
678 }
679 list_del_init(&zhdr->buddy);
680 zhdr->cpu = -1;
681 spin_unlock(&pool->lock);
682
683 page = virt_to_page(zhdr);
684 if (test_bit(NEEDS_COMPACTING, &page->private)) {
685 z3fold_page_unlock(zhdr);
686 zhdr = NULL;
687 put_cpu_ptr(pool->unbuddied);
688 if (can_sleep)
689 cond_resched();
690 goto lookup;
691 }
692
693 /*
694 * this page could not be removed from its unbuddied
695 * list while pool lock was held, and then we've taken
696 * page lock so kref_put could not be called before
697 * we got here, so it's safe to just call kref_get()
698 */
699 kref_get(&zhdr->refcount);
700 break;
701 }
702 put_cpu_ptr(pool->unbuddied);
703
704 if (!zhdr) {
705 int cpu;
706
707 /* look for _exact_ match on other cpus' lists */
708 for_each_online_cpu(cpu) {
709 struct list_head *l;
710
711 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
712 spin_lock(&pool->lock);
713 l = &unbuddied[chunks];
714
715 zhdr = list_first_entry_or_null(READ_ONCE(l),
716 struct z3fold_header, buddy);
717
718 if (!zhdr || !z3fold_page_trylock(zhdr)) {
719 spin_unlock(&pool->lock);
720 zhdr = NULL;
721 continue;
722 }
723 list_del_init(&zhdr->buddy);
724 zhdr->cpu = -1;
725 spin_unlock(&pool->lock);
726
727 page = virt_to_page(zhdr);
728 if (test_bit(NEEDS_COMPACTING, &page->private)) {
729 z3fold_page_unlock(zhdr);
730 zhdr = NULL;
731 if (can_sleep)
732 cond_resched();
733 continue;
734 }
735 kref_get(&zhdr->refcount);
736 break;
737 }
738 }
739
740 return zhdr;
741 }
742
743 /*
744 * API Functions
745 */
746
747 /**
748 * z3fold_create_pool() - create a new z3fold pool
749 * @name: pool name
750 * @gfp: gfp flags when allocating the z3fold pool structure
751 * @ops: user-defined operations for the z3fold pool
752 *
753 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
754 * failed.
755 */
756 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
757 const struct z3fold_ops *ops)
758 {
759 struct z3fold_pool *pool = NULL;
760 int i, cpu;
761
762 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
763 if (!pool)
764 goto out;
765 pool->c_handle = kmem_cache_create("z3fold_handle",
766 sizeof(struct z3fold_buddy_slots),
767 SLOTS_ALIGN, 0, NULL);
768 if (!pool->c_handle)
769 goto out_c;
770 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied)
774 goto out_pool;
775 for_each_possible_cpu(cpu) {
776 struct list_head *unbuddied =
777 per_cpu_ptr(pool->unbuddied, cpu);
778 for_each_unbuddied_list(i, 0)
779 INIT_LIST_HEAD(&unbuddied[i]);
780 }
781 INIT_LIST_HEAD(&pool->lru);
782 INIT_LIST_HEAD(&pool->stale);
783 atomic64_set(&pool->pages_nr, 0);
784 pool->name = name;
785 pool->compact_wq = create_singlethread_workqueue(pool->name);
786 if (!pool->compact_wq)
787 goto out_unbuddied;
788 pool->release_wq = create_singlethread_workqueue(pool->name);
789 if (!pool->release_wq)
790 goto out_wq;
791 if (z3fold_register_migration(pool))
792 goto out_rwq;
793 INIT_WORK(&pool->work, free_pages_work);
794 pool->ops = ops;
795 return pool;
796
797 out_rwq:
798 destroy_workqueue(pool->release_wq);
799 out_wq:
800 destroy_workqueue(pool->compact_wq);
801 out_unbuddied:
802 free_percpu(pool->unbuddied);
803 out_pool:
804 kmem_cache_destroy(pool->c_handle);
805 out_c:
806 kfree(pool);
807 out:
808 return NULL;
809 }
810
811 /**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed
814 *
815 * The pool should be emptied before this function is called.
816 */
817 static void z3fold_destroy_pool(struct z3fold_pool *pool)
818 {
819 kmem_cache_destroy(pool->c_handle);
820 z3fold_unregister_migration(pool);
821 destroy_workqueue(pool->release_wq);
822 destroy_workqueue(pool->compact_wq);
823 kfree(pool);
824 }
825
826 /**
827 * z3fold_alloc() - allocates a region of a given size
828 * @pool: z3fold pool from which to allocate
829 * @size: size in bytes of the desired allocation
830 * @gfp: gfp flags used if the pool needs to grow
831 * @handle: handle of the new allocation
832 *
833 * This function will attempt to find a free region in the pool large enough to
834 * satisfy the allocation request. A search of the unbuddied lists is
835 * performed first. If no suitable free region is found, then a new page is
836 * allocated and added to the pool to satisfy the request.
837 *
838 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
839 * as z3fold pool pages.
840 *
841 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
842 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
843 * a new page.
844 */
845 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
846 unsigned long *handle)
847 {
848 int chunks = size_to_chunks(size);
849 struct z3fold_header *zhdr = NULL;
850 struct page *page = NULL;
851 enum buddy bud;
852 bool can_sleep = gfpflags_allow_blocking(gfp);
853
854 if (!size)
855 return -EINVAL;
856
857 if (size > PAGE_SIZE)
858 return -ENOSPC;
859
860 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
861 bud = HEADLESS;
862 else {
863 retry:
864 zhdr = __z3fold_alloc(pool, size, can_sleep);
865 if (zhdr) {
866 if (zhdr->first_chunks == 0) {
867 if (zhdr->middle_chunks != 0 &&
868 chunks >= zhdr->start_middle)
869 bud = LAST;
870 else
871 bud = FIRST;
872 } else if (zhdr->last_chunks == 0)
873 bud = LAST;
874 else if (zhdr->middle_chunks == 0)
875 bud = MIDDLE;
876 else {
877 if (kref_put(&zhdr->refcount,
878 release_z3fold_page_locked))
879 atomic64_dec(&pool->pages_nr);
880 else
881 z3fold_page_unlock(zhdr);
882 pr_err("No free chunks in unbuddied\n");
883 WARN_ON(1);
884 goto retry;
885 }
886 page = virt_to_page(zhdr);
887 goto found;
888 }
889 bud = FIRST;
890 }
891
892 page = NULL;
893 if (can_sleep) {
894 spin_lock(&pool->stale_lock);
895 zhdr = list_first_entry_or_null(&pool->stale,
896 struct z3fold_header, buddy);
897 /*
898 * Before allocating a page, let's see if we can take one from
899 * the stale pages list. cancel_work_sync() can sleep so we
900 * limit this case to the contexts where we can sleep
901 */
902 if (zhdr) {
903 list_del(&zhdr->buddy);
904 spin_unlock(&pool->stale_lock);
905 cancel_work_sync(&zhdr->work);
906 page = virt_to_page(zhdr);
907 } else {
908 spin_unlock(&pool->stale_lock);
909 }
910 }
911 if (!page)
912 page = alloc_page(gfp);
913
914 if (!page)
915 return -ENOMEM;
916
917 zhdr = init_z3fold_page(page, pool, gfp);
918 if (!zhdr) {
919 __free_page(page);
920 return -ENOMEM;
921 }
922 atomic64_inc(&pool->pages_nr);
923
924 if (bud == HEADLESS) {
925 set_bit(PAGE_HEADLESS, &page->private);
926 goto headless;
927 }
928 if (can_sleep) {
929 lock_page(page);
930 __SetPageMovable(page, pool->inode->i_mapping);
931 unlock_page(page);
932 } else {
933 if (trylock_page(page)) {
934 __SetPageMovable(page, pool->inode->i_mapping);
935 unlock_page(page);
936 }
937 }
938 z3fold_page_lock(zhdr);
939
940 found:
941 if (bud == FIRST)
942 zhdr->first_chunks = chunks;
943 else if (bud == LAST)
944 zhdr->last_chunks = chunks;
945 else {
946 zhdr->middle_chunks = chunks;
947 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
948 }
949 add_to_unbuddied(pool, zhdr);
950
951 headless:
952 spin_lock(&pool->lock);
953 /* Add/move z3fold page to beginning of LRU */
954 if (!list_empty(&page->lru))
955 list_del(&page->lru);
956
957 list_add(&page->lru, &pool->lru);
958
959 *handle = encode_handle(zhdr, bud);
960 spin_unlock(&pool->lock);
961 if (bud != HEADLESS)
962 z3fold_page_unlock(zhdr);
963
964 return 0;
965 }
966
967 /**
968 * z3fold_free() - frees the allocation associated with the given handle
969 * @pool: pool in which the allocation resided
970 * @handle: handle associated with the allocation returned by z3fold_alloc()
971 *
972 * In the case that the z3fold page in which the allocation resides is under
973 * reclaim, as indicated by the PG_reclaim flag being set, this function
974 * only sets the first|last_chunks to 0. The page is actually freed
975 * once both buddies are evicted (see z3fold_reclaim_page() below).
976 */
977 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
978 {
979 struct z3fold_header *zhdr;
980 struct page *page;
981 enum buddy bud;
982
983 zhdr = handle_to_z3fold_header(handle);
984 page = virt_to_page(zhdr);
985
986 if (test_bit(PAGE_HEADLESS, &page->private)) {
987 /* if a headless page is under reclaim, just leave.
988 * NB: we use test_and_set_bit for a reason: if the bit
989 * has not been set before, we release this page
990 * immediately so we don't care about its value any more.
991 */
992 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
993 spin_lock(&pool->lock);
994 list_del(&page->lru);
995 spin_unlock(&pool->lock);
996 free_z3fold_page(page, true);
997 atomic64_dec(&pool->pages_nr);
998 }
999 return;
1000 }
1001
1002 /* Non-headless case */
1003 z3fold_page_lock(zhdr);
1004 bud = handle_to_buddy(handle);
1005
1006 switch (bud) {
1007 case FIRST:
1008 zhdr->first_chunks = 0;
1009 break;
1010 case MIDDLE:
1011 zhdr->middle_chunks = 0;
1012 break;
1013 case LAST:
1014 zhdr->last_chunks = 0;
1015 break;
1016 default:
1017 pr_err("%s: unknown bud %d\n", __func__, bud);
1018 WARN_ON(1);
1019 z3fold_page_unlock(zhdr);
1020 return;
1021 }
1022
1023 free_handle(handle);
1024 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1025 atomic64_dec(&pool->pages_nr);
1026 return;
1027 }
1028 if (test_bit(PAGE_CLAIMED, &page->private)) {
1029 z3fold_page_unlock(zhdr);
1030 return;
1031 }
1032 if (unlikely(PageIsolated(page)) ||
1033 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1034 z3fold_page_unlock(zhdr);
1035 return;
1036 }
1037 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1038 spin_lock(&pool->lock);
1039 list_del_init(&zhdr->buddy);
1040 spin_unlock(&pool->lock);
1041 zhdr->cpu = -1;
1042 kref_get(&zhdr->refcount);
1043 do_compact_page(zhdr, true);
1044 return;
1045 }
1046 kref_get(&zhdr->refcount);
1047 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1048 z3fold_page_unlock(zhdr);
1049 }
1050
1051 /**
1052 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1053 * @pool: pool from which a page will attempt to be evicted
1054 * @retries: number of pages on the LRU list for which eviction will
1055 * be attempted before failing
1056 *
1057 * z3fold reclaim is different from normal system reclaim in that it is done
1058 * from the bottom, up. This is because only the bottom layer, z3fold, has
1059 * information on how the allocations are organized within each z3fold page.
1060 * This has the potential to create interesting locking situations between
1061 * z3fold and the user, however.
1062 *
1063 * To avoid these, this is how z3fold_reclaim_page() should be called:
1064 *
1065 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1066 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1067 * call the user-defined eviction handler with the pool and handle as
1068 * arguments.
1069 *
1070 * If the handle can not be evicted, the eviction handler should return
1071 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1072 * appropriate list and try the next z3fold page on the LRU up to
1073 * a user defined number of retries.
1074 *
1075 * If the handle is successfully evicted, the eviction handler should
1076 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1077 * contains logic to delay freeing the page if the page is under reclaim,
1078 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1079 *
1080 * If all buddies in the z3fold page are successfully evicted, then the
1081 * z3fold page can be freed.
1082 *
1083 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1084 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1085 * the retry limit was hit.
1086 */
1087 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1088 {
1089 int i, ret = 0;
1090 struct z3fold_header *zhdr = NULL;
1091 struct page *page = NULL;
1092 struct list_head *pos;
1093 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1094
1095 spin_lock(&pool->lock);
1096 if (!pool->ops || !pool->ops->evict || retries == 0) {
1097 spin_unlock(&pool->lock);
1098 return -EINVAL;
1099 }
1100 for (i = 0; i < retries; i++) {
1101 if (list_empty(&pool->lru)) {
1102 spin_unlock(&pool->lock);
1103 return -EINVAL;
1104 }
1105 list_for_each_prev(pos, &pool->lru) {
1106 page = list_entry(pos, struct page, lru);
1107
1108 /* this bit could have been set by free, in which case
1109 * we pass over to the next page in the pool.
1110 */
1111 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1112 continue;
1113
1114 if (unlikely(PageIsolated(page)))
1115 continue;
1116 if (test_bit(PAGE_HEADLESS, &page->private))
1117 break;
1118
1119 zhdr = page_address(page);
1120 if (!z3fold_page_trylock(zhdr)) {
1121 zhdr = NULL;
1122 continue; /* can't evict at this point */
1123 }
1124 kref_get(&zhdr->refcount);
1125 list_del_init(&zhdr->buddy);
1126 zhdr->cpu = -1;
1127 break;
1128 }
1129
1130 if (!zhdr)
1131 break;
1132
1133 list_del_init(&page->lru);
1134 spin_unlock(&pool->lock);
1135
1136 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1137 /*
1138 * We need encode the handles before unlocking, since
1139 * we can race with free that will set
1140 * (first|last)_chunks to 0
1141 */
1142 first_handle = 0;
1143 last_handle = 0;
1144 middle_handle = 0;
1145 if (zhdr->first_chunks)
1146 first_handle = encode_handle(zhdr, FIRST);
1147 if (zhdr->middle_chunks)
1148 middle_handle = encode_handle(zhdr, MIDDLE);
1149 if (zhdr->last_chunks)
1150 last_handle = encode_handle(zhdr, LAST);
1151 /*
1152 * it's safe to unlock here because we hold a
1153 * reference to this page
1154 */
1155 z3fold_page_unlock(zhdr);
1156 } else {
1157 first_handle = encode_handle(zhdr, HEADLESS);
1158 last_handle = middle_handle = 0;
1159 }
1160
1161 /* Issue the eviction callback(s) */
1162 if (middle_handle) {
1163 ret = pool->ops->evict(pool, middle_handle);
1164 if (ret)
1165 goto next;
1166 }
1167 if (first_handle) {
1168 ret = pool->ops->evict(pool, first_handle);
1169 if (ret)
1170 goto next;
1171 }
1172 if (last_handle) {
1173 ret = pool->ops->evict(pool, last_handle);
1174 if (ret)
1175 goto next;
1176 }
1177 next:
1178 if (test_bit(PAGE_HEADLESS, &page->private)) {
1179 if (ret == 0) {
1180 free_z3fold_page(page, true);
1181 atomic64_dec(&pool->pages_nr);
1182 return 0;
1183 }
1184 spin_lock(&pool->lock);
1185 list_add(&page->lru, &pool->lru);
1186 spin_unlock(&pool->lock);
1187 } else {
1188 z3fold_page_lock(zhdr);
1189 clear_bit(PAGE_CLAIMED, &page->private);
1190 if (kref_put(&zhdr->refcount,
1191 release_z3fold_page_locked)) {
1192 atomic64_dec(&pool->pages_nr);
1193 return 0;
1194 }
1195 /*
1196 * if we are here, the page is still not completely
1197 * free. Take the global pool lock then to be able
1198 * to add it back to the lru list
1199 */
1200 spin_lock(&pool->lock);
1201 list_add(&page->lru, &pool->lru);
1202 spin_unlock(&pool->lock);
1203 z3fold_page_unlock(zhdr);
1204 }
1205
1206 /* We started off locked to we need to lock the pool back */
1207 spin_lock(&pool->lock);
1208 }
1209 spin_unlock(&pool->lock);
1210 return -EAGAIN;
1211 }
1212
1213 /**
1214 * z3fold_map() - maps the allocation associated with the given handle
1215 * @pool: pool in which the allocation resides
1216 * @handle: handle associated with the allocation to be mapped
1217 *
1218 * Extracts the buddy number from handle and constructs the pointer to the
1219 * correct starting chunk within the page.
1220 *
1221 * Returns: a pointer to the mapped allocation
1222 */
1223 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1224 {
1225 struct z3fold_header *zhdr;
1226 struct page *page;
1227 void *addr;
1228 enum buddy buddy;
1229
1230 zhdr = handle_to_z3fold_header(handle);
1231 addr = zhdr;
1232 page = virt_to_page(zhdr);
1233
1234 if (test_bit(PAGE_HEADLESS, &page->private))
1235 goto out;
1236
1237 z3fold_page_lock(zhdr);
1238 buddy = handle_to_buddy(handle);
1239 switch (buddy) {
1240 case FIRST:
1241 addr += ZHDR_SIZE_ALIGNED;
1242 break;
1243 case MIDDLE:
1244 addr += zhdr->start_middle << CHUNK_SHIFT;
1245 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1246 break;
1247 case LAST:
1248 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1249 break;
1250 default:
1251 pr_err("unknown buddy id %d\n", buddy);
1252 WARN_ON(1);
1253 addr = NULL;
1254 break;
1255 }
1256
1257 if (addr)
1258 zhdr->mapped_count++;
1259 z3fold_page_unlock(zhdr);
1260 out:
1261 return addr;
1262 }
1263
1264 /**
1265 * z3fold_unmap() - unmaps the allocation associated with the given handle
1266 * @pool: pool in which the allocation resides
1267 * @handle: handle associated with the allocation to be unmapped
1268 */
1269 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1270 {
1271 struct z3fold_header *zhdr;
1272 struct page *page;
1273 enum buddy buddy;
1274
1275 zhdr = handle_to_z3fold_header(handle);
1276 page = virt_to_page(zhdr);
1277
1278 if (test_bit(PAGE_HEADLESS, &page->private))
1279 return;
1280
1281 z3fold_page_lock(zhdr);
1282 buddy = handle_to_buddy(handle);
1283 if (buddy == MIDDLE)
1284 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1285 zhdr->mapped_count--;
1286 z3fold_page_unlock(zhdr);
1287 }
1288
1289 /**
1290 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1291 * @pool: pool whose size is being queried
1292 *
1293 * Returns: size in pages of the given pool.
1294 */
1295 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1296 {
1297 return atomic64_read(&pool->pages_nr);
1298 }
1299
1300 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1301 {
1302 struct z3fold_header *zhdr;
1303 struct z3fold_pool *pool;
1304
1305 VM_BUG_ON_PAGE(!PageMovable(page), page);
1306 VM_BUG_ON_PAGE(PageIsolated(page), page);
1307
1308 if (test_bit(PAGE_HEADLESS, &page->private))
1309 return false;
1310
1311 zhdr = page_address(page);
1312 z3fold_page_lock(zhdr);
1313 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1314 test_bit(PAGE_STALE, &page->private))
1315 goto out;
1316
1317 pool = zhdr_to_pool(zhdr);
1318
1319 if (zhdr->mapped_count == 0) {
1320 kref_get(&zhdr->refcount);
1321 if (!list_empty(&zhdr->buddy))
1322 list_del_init(&zhdr->buddy);
1323 spin_lock(&pool->lock);
1324 if (!list_empty(&page->lru))
1325 list_del(&page->lru);
1326 spin_unlock(&pool->lock);
1327 z3fold_page_unlock(zhdr);
1328 return true;
1329 }
1330 out:
1331 z3fold_page_unlock(zhdr);
1332 return false;
1333 }
1334
1335 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1336 struct page *page, enum migrate_mode mode)
1337 {
1338 struct z3fold_header *zhdr, *new_zhdr;
1339 struct z3fold_pool *pool;
1340 struct address_space *new_mapping;
1341
1342 VM_BUG_ON_PAGE(!PageMovable(page), page);
1343 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1344 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1345
1346 zhdr = page_address(page);
1347 pool = zhdr_to_pool(zhdr);
1348
1349 if (!z3fold_page_trylock(zhdr)) {
1350 return -EAGAIN;
1351 }
1352 if (zhdr->mapped_count != 0) {
1353 z3fold_page_unlock(zhdr);
1354 return -EBUSY;
1355 }
1356 if (work_pending(&zhdr->work)) {
1357 z3fold_page_unlock(zhdr);
1358 return -EAGAIN;
1359 }
1360 new_zhdr = page_address(newpage);
1361 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1362 newpage->private = page->private;
1363 page->private = 0;
1364 z3fold_page_unlock(zhdr);
1365 spin_lock_init(&new_zhdr->page_lock);
1366 INIT_WORK(&new_zhdr->work, compact_page_work);
1367 /*
1368 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1369 * so we only have to reinitialize it.
1370 */
1371 INIT_LIST_HEAD(&new_zhdr->buddy);
1372 new_mapping = page_mapping(page);
1373 __ClearPageMovable(page);
1374 ClearPagePrivate(page);
1375
1376 get_page(newpage);
1377 z3fold_page_lock(new_zhdr);
1378 if (new_zhdr->first_chunks)
1379 encode_handle(new_zhdr, FIRST);
1380 if (new_zhdr->last_chunks)
1381 encode_handle(new_zhdr, LAST);
1382 if (new_zhdr->middle_chunks)
1383 encode_handle(new_zhdr, MIDDLE);
1384 set_bit(NEEDS_COMPACTING, &newpage->private);
1385 new_zhdr->cpu = smp_processor_id();
1386 spin_lock(&pool->lock);
1387 list_add(&newpage->lru, &pool->lru);
1388 spin_unlock(&pool->lock);
1389 __SetPageMovable(newpage, new_mapping);
1390 z3fold_page_unlock(new_zhdr);
1391
1392 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1393
1394 page_mapcount_reset(page);
1395 put_page(page);
1396 return 0;
1397 }
1398
1399 static void z3fold_page_putback(struct page *page)
1400 {
1401 struct z3fold_header *zhdr;
1402 struct z3fold_pool *pool;
1403
1404 zhdr = page_address(page);
1405 pool = zhdr_to_pool(zhdr);
1406
1407 z3fold_page_lock(zhdr);
1408 if (!list_empty(&zhdr->buddy))
1409 list_del_init(&zhdr->buddy);
1410 INIT_LIST_HEAD(&page->lru);
1411 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1412 atomic64_dec(&pool->pages_nr);
1413 return;
1414 }
1415 spin_lock(&pool->lock);
1416 list_add(&page->lru, &pool->lru);
1417 spin_unlock(&pool->lock);
1418 z3fold_page_unlock(zhdr);
1419 }
1420
1421 static const struct address_space_operations z3fold_aops = {
1422 .isolate_page = z3fold_page_isolate,
1423 .migratepage = z3fold_page_migrate,
1424 .putback_page = z3fold_page_putback,
1425 };
1426
1427 /*****************
1428 * zpool
1429 ****************/
1430
1431 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1432 {
1433 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1434 return pool->zpool_ops->evict(pool->zpool, handle);
1435 else
1436 return -ENOENT;
1437 }
1438
1439 static const struct z3fold_ops z3fold_zpool_ops = {
1440 .evict = z3fold_zpool_evict
1441 };
1442
1443 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1444 const struct zpool_ops *zpool_ops,
1445 struct zpool *zpool)
1446 {
1447 struct z3fold_pool *pool;
1448
1449 pool = z3fold_create_pool(name, gfp,
1450 zpool_ops ? &z3fold_zpool_ops : NULL);
1451 if (pool) {
1452 pool->zpool = zpool;
1453 pool->zpool_ops = zpool_ops;
1454 }
1455 return pool;
1456 }
1457
1458 static void z3fold_zpool_destroy(void *pool)
1459 {
1460 z3fold_destroy_pool(pool);
1461 }
1462
1463 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1464 unsigned long *handle)
1465 {
1466 return z3fold_alloc(pool, size, gfp, handle);
1467 }
1468 static void z3fold_zpool_free(void *pool, unsigned long handle)
1469 {
1470 z3fold_free(pool, handle);
1471 }
1472
1473 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1474 unsigned int *reclaimed)
1475 {
1476 unsigned int total = 0;
1477 int ret = -EINVAL;
1478
1479 while (total < pages) {
1480 ret = z3fold_reclaim_page(pool, 8);
1481 if (ret < 0)
1482 break;
1483 total++;
1484 }
1485
1486 if (reclaimed)
1487 *reclaimed = total;
1488
1489 return ret;
1490 }
1491
1492 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1493 enum zpool_mapmode mm)
1494 {
1495 return z3fold_map(pool, handle);
1496 }
1497 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1498 {
1499 z3fold_unmap(pool, handle);
1500 }
1501
1502 static u64 z3fold_zpool_total_size(void *pool)
1503 {
1504 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1505 }
1506
1507 static struct zpool_driver z3fold_zpool_driver = {
1508 .type = "z3fold",
1509 .owner = THIS_MODULE,
1510 .create = z3fold_zpool_create,
1511 .destroy = z3fold_zpool_destroy,
1512 .malloc = z3fold_zpool_malloc,
1513 .free = z3fold_zpool_free,
1514 .shrink = z3fold_zpool_shrink,
1515 .map = z3fold_zpool_map,
1516 .unmap = z3fold_zpool_unmap,
1517 .total_size = z3fold_zpool_total_size,
1518 };
1519
1520 MODULE_ALIAS("zpool-z3fold");
1521
1522 static int __init init_z3fold(void)
1523 {
1524 int ret;
1525
1526 /* Make sure the z3fold header is not larger than the page size */
1527 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1528 ret = z3fold_mount();
1529 if (ret)
1530 return ret;
1531
1532 zpool_register_driver(&z3fold_zpool_driver);
1533
1534 return 0;
1535 }
1536
1537 static void __exit exit_z3fold(void)
1538 {
1539 z3fold_unmount();
1540 zpool_unregister_driver(&z3fold_zpool_driver);
1541 }
1542
1543 module_init(init_z3fold);
1544 module_exit(exit_z3fold);
1545
1546 MODULE_LICENSE("GPL");
1547 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1548 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");