]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/z3fold.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 69
[thirdparty/kernel/stable.git] / mm / z3fold.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
9a001fc1
VW
2/*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/atomic.h>
d30561c5 27#include <linux/sched.h>
1f862989
VW
28#include <linux/cpumask.h>
29#include <linux/dcache.h>
9a001fc1
VW
30#include <linux/list.h>
31#include <linux/mm.h>
32#include <linux/module.h>
1f862989
VW
33#include <linux/page-flags.h>
34#include <linux/migrate.h>
35#include <linux/node.h>
36#include <linux/compaction.h>
d30561c5 37#include <linux/percpu.h>
1f862989
VW
38#include <linux/mount.h>
39#include <linux/fs.h>
9a001fc1 40#include <linux/preempt.h>
d30561c5 41#include <linux/workqueue.h>
9a001fc1
VW
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/zpool.h>
45
7c2b8baa
VW
46/*
47 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
48 * adjusting internal fragmentation. It also determines the number of
49 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
50 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
51 * in the beginning of an allocated page are occupied by z3fold header, so
52 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
53 * which shows the max number of free chunks in z3fold page, also there will
54 * be 63, or 62, respectively, freelists per pool.
55 */
56#define NCHUNKS_ORDER 6
57
58#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
59#define CHUNK_SIZE (1 << CHUNK_SHIFT)
60#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
61#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
62#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
63#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
64
65#define BUDDY_MASK (0x3)
66#define BUDDY_SHIFT 2
67#define SLOTS_ALIGN (0x40)
68
9a001fc1
VW
69/*****************
70 * Structures
71*****************/
ede93213
VW
72struct z3fold_pool;
73struct z3fold_ops {
74 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
75};
76
77enum buddy {
78 HEADLESS = 0,
79 FIRST,
80 MIDDLE,
81 LAST,
7c2b8baa
VW
82 BUDDIES_MAX = LAST
83};
84
85struct z3fold_buddy_slots {
86 /*
87 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
88 * be enough slots to hold all possible variants
89 */
90 unsigned long slot[BUDDY_MASK + 1];
91 unsigned long pool; /* back link + flags */
ede93213 92};
7c2b8baa 93#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
94
95/*
d30561c5 96 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 97 * z3fold page, except for HEADLESS pages
d30561c5
VW
98 * @buddy: links the z3fold page into the relevant list in the
99 * pool
2f1e5e4d 100 * @page_lock: per-page lock
d30561c5
VW
101 * @refcount: reference count for the z3fold page
102 * @work: work_struct for page layout optimization
7c2b8baa 103 * @slots: pointer to the structure holding buddy slots
d30561c5 104 * @cpu: CPU which this page "belongs" to
ede93213
VW
105 * @first_chunks: the size of the first buddy in chunks, 0 if free
106 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
107 * @last_chunks: the size of the last buddy in chunks, 0 if free
108 * @first_num: the starting number (for the first handle)
1f862989 109 * @mapped_count: the number of objects currently mapped
ede93213
VW
110 */
111struct z3fold_header {
112 struct list_head buddy;
2f1e5e4d 113 spinlock_t page_lock;
5a27aa82 114 struct kref refcount;
d30561c5 115 struct work_struct work;
7c2b8baa 116 struct z3fold_buddy_slots *slots;
d30561c5 117 short cpu;
ede93213
VW
118 unsigned short first_chunks;
119 unsigned short middle_chunks;
120 unsigned short last_chunks;
121 unsigned short start_middle;
122 unsigned short first_num:2;
1f862989 123 unsigned short mapped_count:2;
ede93213
VW
124};
125
9a001fc1
VW
126/**
127 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
128 * @name: pool name
129 * @lock: protects pool unbuddied/lru lists
130 * @stale_lock: protects pool stale page list
131 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
132 * buddies; the list each z3fold page is added to depends on
133 * the size of its free region.
9a001fc1
VW
134 * @lru: list tracking the z3fold pages in LRU order by most recently
135 * added buddy.
d30561c5 136 * @stale: list of pages marked for freeing
9a001fc1 137 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 138 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
139 * @ops: pointer to a structure of user defined operations specified at
140 * pool creation time.
d30561c5
VW
141 * @compact_wq: workqueue for page layout background optimization
142 * @release_wq: workqueue for safe page release
143 * @work: work_struct for safe page release
1f862989 144 * @inode: inode for z3fold pseudo filesystem
9a001fc1
VW
145 *
146 * This structure is allocated at pool creation time and maintains metadata
147 * pertaining to a particular z3fold pool.
148 */
149struct z3fold_pool {
d30561c5 150 const char *name;
9a001fc1 151 spinlock_t lock;
d30561c5
VW
152 spinlock_t stale_lock;
153 struct list_head *unbuddied;
9a001fc1 154 struct list_head lru;
d30561c5 155 struct list_head stale;
12d59ae6 156 atomic64_t pages_nr;
7c2b8baa 157 struct kmem_cache *c_handle;
9a001fc1
VW
158 const struct z3fold_ops *ops;
159 struct zpool *zpool;
160 const struct zpool_ops *zpool_ops;
d30561c5
VW
161 struct workqueue_struct *compact_wq;
162 struct workqueue_struct *release_wq;
163 struct work_struct work;
1f862989 164 struct inode *inode;
9a001fc1
VW
165};
166
9a001fc1
VW
167/*
168 * Internal z3fold page flags
169 */
170enum z3fold_page_flags {
5a27aa82 171 PAGE_HEADLESS = 0,
9a001fc1 172 MIDDLE_CHUNK_MAPPED,
d30561c5 173 NEEDS_COMPACTING,
6098d7e1 174 PAGE_STALE,
ca0246bb 175 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
176};
177
178/*****************
179 * Helpers
180*****************/
181
182/* Converts an allocation size in bytes to size in z3fold chunks */
183static int size_to_chunks(size_t size)
184{
185 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
186}
187
188#define for_each_unbuddied_list(_iter, _begin) \
189 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
190
d30561c5
VW
191static void compact_page_work(struct work_struct *w);
192
7c2b8baa
VW
193static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
194{
195 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
196 GFP_KERNEL);
197
198 if (slots) {
199 memset(slots->slot, 0, sizeof(slots->slot));
200 slots->pool = (unsigned long)pool;
201 }
202
203 return slots;
204}
205
206static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
207{
208 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
209}
210
211static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
212{
213 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
214}
215
216static inline void free_handle(unsigned long handle)
217{
218 struct z3fold_buddy_slots *slots;
219 int i;
220 bool is_free;
221
222 if (handle & (1 << PAGE_HEADLESS))
223 return;
224
225 WARN_ON(*(unsigned long *)handle == 0);
226 *(unsigned long *)handle = 0;
227 slots = handle_to_slots(handle);
228 is_free = true;
229 for (i = 0; i <= BUDDY_MASK; i++) {
230 if (slots->slot[i]) {
231 is_free = false;
232 break;
233 }
234 }
235
236 if (is_free) {
237 struct z3fold_pool *pool = slots_to_pool(slots);
238
239 kmem_cache_free(pool->c_handle, slots);
240 }
241}
242
1f862989
VW
243static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
244 int flags, const char *dev_name, void *data)
245{
246 static const struct dentry_operations ops = {
247 .d_dname = simple_dname,
248 };
249
250 return mount_pseudo(fs_type, "z3fold:", NULL, &ops, 0x33);
251}
252
253static struct file_system_type z3fold_fs = {
254 .name = "z3fold",
255 .mount = z3fold_do_mount,
256 .kill_sb = kill_anon_super,
257};
258
259static struct vfsmount *z3fold_mnt;
260static int z3fold_mount(void)
261{
262 int ret = 0;
263
264 z3fold_mnt = kern_mount(&z3fold_fs);
265 if (IS_ERR(z3fold_mnt))
266 ret = PTR_ERR(z3fold_mnt);
267
268 return ret;
269}
270
271static void z3fold_unmount(void)
272{
273 kern_unmount(z3fold_mnt);
274}
275
276static const struct address_space_operations z3fold_aops;
277static int z3fold_register_migration(struct z3fold_pool *pool)
278{
279 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
280 if (IS_ERR(pool->inode)) {
281 pool->inode = NULL;
282 return 1;
283 }
284
285 pool->inode->i_mapping->private_data = pool;
286 pool->inode->i_mapping->a_ops = &z3fold_aops;
287 return 0;
288}
289
290static void z3fold_unregister_migration(struct z3fold_pool *pool)
291{
292 if (pool->inode)
293 iput(pool->inode);
294 }
295
9a001fc1 296/* Initializes the z3fold header of a newly allocated z3fold page */
d30561c5
VW
297static struct z3fold_header *init_z3fold_page(struct page *page,
298 struct z3fold_pool *pool)
9a001fc1
VW
299{
300 struct z3fold_header *zhdr = page_address(page);
7c2b8baa
VW
301 struct z3fold_buddy_slots *slots = alloc_slots(pool);
302
303 if (!slots)
304 return NULL;
9a001fc1
VW
305
306 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
307 clear_bit(PAGE_HEADLESS, &page->private);
308 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
309 clear_bit(NEEDS_COMPACTING, &page->private);
310 clear_bit(PAGE_STALE, &page->private);
ca0246bb 311 clear_bit(PAGE_CLAIMED, &page->private);
9a001fc1 312
2f1e5e4d 313 spin_lock_init(&zhdr->page_lock);
5a27aa82 314 kref_init(&zhdr->refcount);
9a001fc1
VW
315 zhdr->first_chunks = 0;
316 zhdr->middle_chunks = 0;
317 zhdr->last_chunks = 0;
318 zhdr->first_num = 0;
319 zhdr->start_middle = 0;
d30561c5 320 zhdr->cpu = -1;
7c2b8baa 321 zhdr->slots = slots;
9a001fc1 322 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 323 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
324 return zhdr;
325}
326
327/* Resets the struct page fields and frees the page */
1f862989 328static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 329{
1f862989
VW
330 if (!headless) {
331 lock_page(page);
332 __ClearPageMovable(page);
333 unlock_page(page);
334 }
335 ClearPagePrivate(page);
5a27aa82
VW
336 __free_page(page);
337}
338
2f1e5e4d
VW
339/* Lock a z3fold page */
340static inline void z3fold_page_lock(struct z3fold_header *zhdr)
341{
342 spin_lock(&zhdr->page_lock);
343}
344
76e32a2a
VW
345/* Try to lock a z3fold page */
346static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
347{
348 return spin_trylock(&zhdr->page_lock);
349}
350
2f1e5e4d
VW
351/* Unlock a z3fold page */
352static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
353{
354 spin_unlock(&zhdr->page_lock);
355}
356
7c2b8baa
VW
357/* Helper function to build the index */
358static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
359{
360 return (bud + zhdr->first_num) & BUDDY_MASK;
361}
362
9a001fc1
VW
363/*
364 * Encodes the handle of a particular buddy within a z3fold page
365 * Pool lock should be held as this function accesses first_num
366 */
367static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
368{
7c2b8baa
VW
369 struct z3fold_buddy_slots *slots;
370 unsigned long h = (unsigned long)zhdr;
371 int idx = 0;
9a001fc1 372
7c2b8baa
VW
373 /*
374 * For a headless page, its handle is its pointer with the extra
375 * PAGE_HEADLESS bit set
376 */
377 if (bud == HEADLESS)
378 return h | (1 << PAGE_HEADLESS);
379
380 /* otherwise, return pointer to encoded handle */
381 idx = __idx(zhdr, bud);
382 h += idx;
383 if (bud == LAST)
384 h |= (zhdr->last_chunks << BUDDY_SHIFT);
385
386 slots = zhdr->slots;
387 slots->slot[idx] = h;
388 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
389}
390
391/* Returns the z3fold page where a given handle is stored */
1f862989 392static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
9a001fc1 393{
1f862989 394 unsigned long addr = h;
7c2b8baa
VW
395
396 if (!(addr & (1 << PAGE_HEADLESS)))
1f862989 397 addr = *(unsigned long *)h;
7c2b8baa
VW
398
399 return (struct z3fold_header *)(addr & PAGE_MASK);
9a001fc1
VW
400}
401
ca0246bb
VW
402/* only for LAST bud, returns zero otherwise */
403static unsigned short handle_to_chunks(unsigned long handle)
404{
7c2b8baa
VW
405 unsigned long addr = *(unsigned long *)handle;
406
407 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
408}
409
f201ebd8 410/*
411 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
412 * but that doesn't matter. because the masking will result in the
413 * correct buddy number.
414 */
9a001fc1
VW
415static enum buddy handle_to_buddy(unsigned long handle)
416{
7c2b8baa
VW
417 struct z3fold_header *zhdr;
418 unsigned long addr;
419
420 WARN_ON(handle & (1 << PAGE_HEADLESS));
421 addr = *(unsigned long *)handle;
422 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
423 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
424}
425
9050cce1
VW
426static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
427{
7c2b8baa 428 return slots_to_pool(zhdr->slots);
9050cce1
VW
429}
430
d30561c5
VW
431static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
432{
433 struct page *page = virt_to_page(zhdr);
9050cce1 434 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
435
436 WARN_ON(!list_empty(&zhdr->buddy));
437 set_bit(PAGE_STALE, &page->private);
35529357 438 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
439 spin_lock(&pool->lock);
440 if (!list_empty(&page->lru))
1f862989 441 list_del_init(&page->lru);
d30561c5
VW
442 spin_unlock(&pool->lock);
443 if (locked)
444 z3fold_page_unlock(zhdr);
445 spin_lock(&pool->stale_lock);
446 list_add(&zhdr->buddy, &pool->stale);
447 queue_work(pool->release_wq, &pool->work);
448 spin_unlock(&pool->stale_lock);
449}
450
451static void __attribute__((__unused__))
452 release_z3fold_page(struct kref *ref)
453{
454 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
455 refcount);
456 __release_z3fold_page(zhdr, false);
457}
458
459static void release_z3fold_page_locked(struct kref *ref)
460{
461 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
462 refcount);
463 WARN_ON(z3fold_page_trylock(zhdr));
464 __release_z3fold_page(zhdr, true);
465}
466
467static void release_z3fold_page_locked_list(struct kref *ref)
468{
469 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
470 refcount);
9050cce1
VW
471 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
472 spin_lock(&pool->lock);
d30561c5 473 list_del_init(&zhdr->buddy);
9050cce1 474 spin_unlock(&pool->lock);
d30561c5
VW
475
476 WARN_ON(z3fold_page_trylock(zhdr));
477 __release_z3fold_page(zhdr, true);
478}
479
480static void free_pages_work(struct work_struct *w)
481{
482 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
483
484 spin_lock(&pool->stale_lock);
485 while (!list_empty(&pool->stale)) {
486 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
487 struct z3fold_header, buddy);
488 struct page *page = virt_to_page(zhdr);
489
490 list_del(&zhdr->buddy);
491 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
492 continue;
d30561c5
VW
493 spin_unlock(&pool->stale_lock);
494 cancel_work_sync(&zhdr->work);
1f862989 495 free_z3fold_page(page, false);
d30561c5
VW
496 cond_resched();
497 spin_lock(&pool->stale_lock);
498 }
499 spin_unlock(&pool->stale_lock);
500}
501
9a001fc1
VW
502/*
503 * Returns the number of free chunks in a z3fold page.
504 * NB: can't be used with HEADLESS pages.
505 */
506static int num_free_chunks(struct z3fold_header *zhdr)
507{
508 int nfree;
509 /*
510 * If there is a middle object, pick up the bigger free space
511 * either before or after it. Otherwise just subtract the number
512 * of chunks occupied by the first and the last objects.
513 */
514 if (zhdr->middle_chunks != 0) {
515 int nfree_before = zhdr->first_chunks ?
ede93213 516 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 517 int nfree_after = zhdr->last_chunks ?
ede93213
VW
518 0 : TOTAL_CHUNKS -
519 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
520 nfree = max(nfree_before, nfree_after);
521 } else
522 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
523 return nfree;
524}
525
9050cce1
VW
526/* Add to the appropriate unbuddied list */
527static inline void add_to_unbuddied(struct z3fold_pool *pool,
528 struct z3fold_header *zhdr)
529{
530 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
531 zhdr->middle_chunks == 0) {
532 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
533
534 int freechunks = num_free_chunks(zhdr);
535 spin_lock(&pool->lock);
536 list_add(&zhdr->buddy, &unbuddied[freechunks]);
537 spin_unlock(&pool->lock);
538 zhdr->cpu = smp_processor_id();
539 put_cpu_ptr(pool->unbuddied);
540 }
541}
542
ede93213
VW
543static inline void *mchunk_memmove(struct z3fold_header *zhdr,
544 unsigned short dst_chunk)
545{
546 void *beg = zhdr;
547 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
548 beg + (zhdr->start_middle << CHUNK_SHIFT),
549 zhdr->middle_chunks << CHUNK_SHIFT);
550}
551
1b096e5a 552#define BIG_CHUNK_GAP 3
9a001fc1
VW
553/* Has to be called with lock held */
554static int z3fold_compact_page(struct z3fold_header *zhdr)
555{
556 struct page *page = virt_to_page(zhdr);
9a001fc1 557
ede93213
VW
558 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
559 return 0; /* can't move middle chunk, it's used */
9a001fc1 560
1f862989
VW
561 if (unlikely(PageIsolated(page)))
562 return 0;
563
ede93213
VW
564 if (zhdr->middle_chunks == 0)
565 return 0; /* nothing to compact */
566
567 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
568 /* move to the beginning */
569 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
570 zhdr->first_chunks = zhdr->middle_chunks;
571 zhdr->middle_chunks = 0;
572 zhdr->start_middle = 0;
573 zhdr->first_num++;
1b096e5a 574 return 1;
9a001fc1 575 }
1b096e5a
VW
576
577 /*
578 * moving data is expensive, so let's only do that if
579 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
580 */
581 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
582 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
583 BIG_CHUNK_GAP) {
584 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
585 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
586 return 1;
587 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
588 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
589 + zhdr->middle_chunks) >=
590 BIG_CHUNK_GAP) {
591 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
592 zhdr->middle_chunks;
593 mchunk_memmove(zhdr, new_start);
594 zhdr->start_middle = new_start;
595 return 1;
596 }
597
598 return 0;
9a001fc1
VW
599}
600
d30561c5
VW
601static void do_compact_page(struct z3fold_header *zhdr, bool locked)
602{
9050cce1 603 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 604 struct page *page;
d30561c5
VW
605
606 page = virt_to_page(zhdr);
607 if (locked)
608 WARN_ON(z3fold_page_trylock(zhdr));
609 else
610 z3fold_page_lock(zhdr);
5d03a661 611 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
612 z3fold_page_unlock(zhdr);
613 return;
614 }
615 spin_lock(&pool->lock);
616 list_del_init(&zhdr->buddy);
617 spin_unlock(&pool->lock);
618
5d03a661
VW
619 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
620 atomic64_dec(&pool->pages_nr);
621 return;
622 }
623
1f862989
VW
624 if (unlikely(PageIsolated(page) ||
625 test_bit(PAGE_STALE, &page->private))) {
626 z3fold_page_unlock(zhdr);
627 return;
628 }
629
d30561c5 630 z3fold_compact_page(zhdr);
9050cce1 631 add_to_unbuddied(pool, zhdr);
d30561c5
VW
632 z3fold_page_unlock(zhdr);
633}
634
635static void compact_page_work(struct work_struct *w)
636{
637 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
638 work);
639
640 do_compact_page(zhdr, false);
641}
642
9050cce1
VW
643/* returns _locked_ z3fold page header or NULL */
644static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
645 size_t size, bool can_sleep)
646{
647 struct z3fold_header *zhdr = NULL;
648 struct page *page;
649 struct list_head *unbuddied;
650 int chunks = size_to_chunks(size), i;
651
652lookup:
653 /* First, try to find an unbuddied z3fold page. */
654 unbuddied = get_cpu_ptr(pool->unbuddied);
655 for_each_unbuddied_list(i, chunks) {
656 struct list_head *l = &unbuddied[i];
657
658 zhdr = list_first_entry_or_null(READ_ONCE(l),
659 struct z3fold_header, buddy);
660
661 if (!zhdr)
662 continue;
663
664 /* Re-check under lock. */
665 spin_lock(&pool->lock);
666 l = &unbuddied[i];
667 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
668 struct z3fold_header, buddy)) ||
669 !z3fold_page_trylock(zhdr)) {
670 spin_unlock(&pool->lock);
671 zhdr = NULL;
672 put_cpu_ptr(pool->unbuddied);
673 if (can_sleep)
674 cond_resched();
675 goto lookup;
676 }
677 list_del_init(&zhdr->buddy);
678 zhdr->cpu = -1;
679 spin_unlock(&pool->lock);
680
681 page = virt_to_page(zhdr);
682 if (test_bit(NEEDS_COMPACTING, &page->private)) {
683 z3fold_page_unlock(zhdr);
684 zhdr = NULL;
685 put_cpu_ptr(pool->unbuddied);
686 if (can_sleep)
687 cond_resched();
688 goto lookup;
689 }
690
691 /*
692 * this page could not be removed from its unbuddied
693 * list while pool lock was held, and then we've taken
694 * page lock so kref_put could not be called before
695 * we got here, so it's safe to just call kref_get()
696 */
697 kref_get(&zhdr->refcount);
698 break;
699 }
700 put_cpu_ptr(pool->unbuddied);
701
351618b2
VW
702 if (!zhdr) {
703 int cpu;
704
705 /* look for _exact_ match on other cpus' lists */
706 for_each_online_cpu(cpu) {
707 struct list_head *l;
708
709 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
710 spin_lock(&pool->lock);
711 l = &unbuddied[chunks];
712
713 zhdr = list_first_entry_or_null(READ_ONCE(l),
714 struct z3fold_header, buddy);
715
716 if (!zhdr || !z3fold_page_trylock(zhdr)) {
717 spin_unlock(&pool->lock);
718 zhdr = NULL;
719 continue;
720 }
721 list_del_init(&zhdr->buddy);
722 zhdr->cpu = -1;
723 spin_unlock(&pool->lock);
724
725 page = virt_to_page(zhdr);
726 if (test_bit(NEEDS_COMPACTING, &page->private)) {
727 z3fold_page_unlock(zhdr);
728 zhdr = NULL;
729 if (can_sleep)
730 cond_resched();
731 continue;
732 }
733 kref_get(&zhdr->refcount);
734 break;
735 }
736 }
737
9050cce1
VW
738 return zhdr;
739}
d30561c5
VW
740
741/*
742 * API Functions
743 */
744
745/**
746 * z3fold_create_pool() - create a new z3fold pool
747 * @name: pool name
748 * @gfp: gfp flags when allocating the z3fold pool structure
749 * @ops: user-defined operations for the z3fold pool
750 *
751 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
752 * failed.
753 */
754static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
755 const struct z3fold_ops *ops)
756{
757 struct z3fold_pool *pool = NULL;
758 int i, cpu;
759
760 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
761 if (!pool)
762 goto out;
7c2b8baa
VW
763 pool->c_handle = kmem_cache_create("z3fold_handle",
764 sizeof(struct z3fold_buddy_slots),
765 SLOTS_ALIGN, 0, NULL);
766 if (!pool->c_handle)
767 goto out_c;
d30561c5
VW
768 spin_lock_init(&pool->lock);
769 spin_lock_init(&pool->stale_lock);
770 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1ec6995d
XW
771 if (!pool->unbuddied)
772 goto out_pool;
d30561c5
VW
773 for_each_possible_cpu(cpu) {
774 struct list_head *unbuddied =
775 per_cpu_ptr(pool->unbuddied, cpu);
776 for_each_unbuddied_list(i, 0)
777 INIT_LIST_HEAD(&unbuddied[i]);
778 }
779 INIT_LIST_HEAD(&pool->lru);
780 INIT_LIST_HEAD(&pool->stale);
781 atomic64_set(&pool->pages_nr, 0);
782 pool->name = name;
783 pool->compact_wq = create_singlethread_workqueue(pool->name);
784 if (!pool->compact_wq)
1ec6995d 785 goto out_unbuddied;
d30561c5
VW
786 pool->release_wq = create_singlethread_workqueue(pool->name);
787 if (!pool->release_wq)
788 goto out_wq;
1f862989
VW
789 if (z3fold_register_migration(pool))
790 goto out_rwq;
d30561c5
VW
791 INIT_WORK(&pool->work, free_pages_work);
792 pool->ops = ops;
793 return pool;
794
1f862989
VW
795out_rwq:
796 destroy_workqueue(pool->release_wq);
d30561c5
VW
797out_wq:
798 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
799out_unbuddied:
800 free_percpu(pool->unbuddied);
801out_pool:
7c2b8baa
VW
802 kmem_cache_destroy(pool->c_handle);
803out_c:
d30561c5 804 kfree(pool);
1ec6995d 805out:
d30561c5
VW
806 return NULL;
807}
808
809/**
810 * z3fold_destroy_pool() - destroys an existing z3fold pool
811 * @pool: the z3fold pool to be destroyed
812 *
813 * The pool should be emptied before this function is called.
814 */
815static void z3fold_destroy_pool(struct z3fold_pool *pool)
816{
7c2b8baa 817 kmem_cache_destroy(pool->c_handle);
1f862989 818 z3fold_unregister_migration(pool);
d30561c5
VW
819 destroy_workqueue(pool->release_wq);
820 destroy_workqueue(pool->compact_wq);
821 kfree(pool);
822}
823
9a001fc1
VW
824/**
825 * z3fold_alloc() - allocates a region of a given size
826 * @pool: z3fold pool from which to allocate
827 * @size: size in bytes of the desired allocation
828 * @gfp: gfp flags used if the pool needs to grow
829 * @handle: handle of the new allocation
830 *
831 * This function will attempt to find a free region in the pool large enough to
832 * satisfy the allocation request. A search of the unbuddied lists is
833 * performed first. If no suitable free region is found, then a new page is
834 * allocated and added to the pool to satisfy the request.
835 *
836 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
837 * as z3fold pool pages.
838 *
839 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
840 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
841 * a new page.
842 */
843static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
844 unsigned long *handle)
845{
9050cce1 846 int chunks = size_to_chunks(size);
9a001fc1 847 struct z3fold_header *zhdr = NULL;
d30561c5 848 struct page *page = NULL;
9a001fc1 849 enum buddy bud;
8a97ea54 850 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1
VW
851
852 if (!size || (gfp & __GFP_HIGHMEM))
853 return -EINVAL;
854
855 if (size > PAGE_SIZE)
856 return -ENOSPC;
857
858 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
859 bud = HEADLESS;
860 else {
9050cce1
VW
861retry:
862 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 863 if (zhdr) {
2f1e5e4d
VW
864 if (zhdr->first_chunks == 0) {
865 if (zhdr->middle_chunks != 0 &&
866 chunks >= zhdr->start_middle)
9a001fc1 867 bud = LAST;
2f1e5e4d
VW
868 else
869 bud = FIRST;
870 } else if (zhdr->last_chunks == 0)
871 bud = LAST;
872 else if (zhdr->middle_chunks == 0)
873 bud = MIDDLE;
874 else {
5a27aa82 875 if (kref_put(&zhdr->refcount,
d30561c5 876 release_z3fold_page_locked))
5a27aa82 877 atomic64_dec(&pool->pages_nr);
d30561c5
VW
878 else
879 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
880 pr_err("No free chunks in unbuddied\n");
881 WARN_ON(1);
9050cce1 882 goto retry;
9a001fc1 883 }
9050cce1 884 page = virt_to_page(zhdr);
2f1e5e4d 885 goto found;
9a001fc1
VW
886 }
887 bud = FIRST;
9a001fc1
VW
888 }
889
5c9bab59
VW
890 page = NULL;
891 if (can_sleep) {
892 spin_lock(&pool->stale_lock);
893 zhdr = list_first_entry_or_null(&pool->stale,
894 struct z3fold_header, buddy);
895 /*
896 * Before allocating a page, let's see if we can take one from
897 * the stale pages list. cancel_work_sync() can sleep so we
898 * limit this case to the contexts where we can sleep
899 */
900 if (zhdr) {
901 list_del(&zhdr->buddy);
902 spin_unlock(&pool->stale_lock);
d30561c5 903 cancel_work_sync(&zhdr->work);
5c9bab59
VW
904 page = virt_to_page(zhdr);
905 } else {
906 spin_unlock(&pool->stale_lock);
907 }
d30561c5 908 }
5c9bab59
VW
909 if (!page)
910 page = alloc_page(gfp);
d30561c5 911
9a001fc1
VW
912 if (!page)
913 return -ENOMEM;
2f1e5e4d 914
d30561c5 915 zhdr = init_z3fold_page(page, pool);
9050cce1
VW
916 if (!zhdr) {
917 __free_page(page);
918 return -ENOMEM;
919 }
920 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
921
922 if (bud == HEADLESS) {
923 set_bit(PAGE_HEADLESS, &page->private);
924 goto headless;
925 }
1f862989 926 __SetPageMovable(page, pool->inode->i_mapping);
2f1e5e4d 927 z3fold_page_lock(zhdr);
9a001fc1
VW
928
929found:
930 if (bud == FIRST)
931 zhdr->first_chunks = chunks;
932 else if (bud == LAST)
933 zhdr->last_chunks = chunks;
934 else {
935 zhdr->middle_chunks = chunks;
ede93213 936 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 937 }
9050cce1 938 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
939
940headless:
d30561c5 941 spin_lock(&pool->lock);
9a001fc1
VW
942 /* Add/move z3fold page to beginning of LRU */
943 if (!list_empty(&page->lru))
944 list_del(&page->lru);
945
946 list_add(&page->lru, &pool->lru);
947
948 *handle = encode_handle(zhdr, bud);
949 spin_unlock(&pool->lock);
2f1e5e4d
VW
950 if (bud != HEADLESS)
951 z3fold_page_unlock(zhdr);
9a001fc1
VW
952
953 return 0;
954}
955
956/**
957 * z3fold_free() - frees the allocation associated with the given handle
958 * @pool: pool in which the allocation resided
959 * @handle: handle associated with the allocation returned by z3fold_alloc()
960 *
961 * In the case that the z3fold page in which the allocation resides is under
962 * reclaim, as indicated by the PG_reclaim flag being set, this function
963 * only sets the first|last_chunks to 0. The page is actually freed
964 * once both buddies are evicted (see z3fold_reclaim_page() below).
965 */
966static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
967{
968 struct z3fold_header *zhdr;
9a001fc1
VW
969 struct page *page;
970 enum buddy bud;
971
9a001fc1
VW
972 zhdr = handle_to_z3fold_header(handle);
973 page = virt_to_page(zhdr);
974
975 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
976 /* if a headless page is under reclaim, just leave.
977 * NB: we use test_and_set_bit for a reason: if the bit
978 * has not been set before, we release this page
979 * immediately so we don't care about its value any more.
980 */
981 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
982 spin_lock(&pool->lock);
983 list_del(&page->lru);
984 spin_unlock(&pool->lock);
1f862989 985 free_z3fold_page(page, true);
ca0246bb 986 atomic64_dec(&pool->pages_nr);
9a001fc1 987 }
ca0246bb 988 return;
9a001fc1
VW
989 }
990
ca0246bb
VW
991 /* Non-headless case */
992 z3fold_page_lock(zhdr);
993 bud = handle_to_buddy(handle);
994
995 switch (bud) {
996 case FIRST:
997 zhdr->first_chunks = 0;
998 break;
999 case MIDDLE:
1000 zhdr->middle_chunks = 0;
1001 break;
1002 case LAST:
1003 zhdr->last_chunks = 0;
1004 break;
1005 default:
1006 pr_err("%s: unknown bud %d\n", __func__, bud);
1007 WARN_ON(1);
1008 z3fold_page_unlock(zhdr);
d30561c5
VW
1009 return;
1010 }
1011
7c2b8baa 1012 free_handle(handle);
d30561c5
VW
1013 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1014 atomic64_dec(&pool->pages_nr);
1015 return;
1016 }
ca0246bb 1017 if (test_bit(PAGE_CLAIMED, &page->private)) {
6098d7e1
VW
1018 z3fold_page_unlock(zhdr);
1019 return;
1020 }
1f862989
VW
1021 if (unlikely(PageIsolated(page)) ||
1022 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5a27aa82 1023 z3fold_page_unlock(zhdr);
d30561c5
VW
1024 return;
1025 }
1026 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 1027 spin_lock(&pool->lock);
d30561c5 1028 list_del_init(&zhdr->buddy);
2f1e5e4d 1029 spin_unlock(&pool->lock);
d30561c5 1030 zhdr->cpu = -1;
5d03a661 1031 kref_get(&zhdr->refcount);
d30561c5
VW
1032 do_compact_page(zhdr, true);
1033 return;
9a001fc1 1034 }
5d03a661 1035 kref_get(&zhdr->refcount);
d30561c5
VW
1036 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1037 z3fold_page_unlock(zhdr);
9a001fc1
VW
1038}
1039
1040/**
1041 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1042 * @pool: pool from which a page will attempt to be evicted
f144c390 1043 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
1044 * be attempted before failing
1045 *
1046 * z3fold reclaim is different from normal system reclaim in that it is done
1047 * from the bottom, up. This is because only the bottom layer, z3fold, has
1048 * information on how the allocations are organized within each z3fold page.
1049 * This has the potential to create interesting locking situations between
1050 * z3fold and the user, however.
1051 *
1052 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 1053 *
9a001fc1
VW
1054 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1055 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1056 * call the user-defined eviction handler with the pool and handle as
1057 * arguments.
1058 *
1059 * If the handle can not be evicted, the eviction handler should return
1060 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1061 * appropriate list and try the next z3fold page on the LRU up to
1062 * a user defined number of retries.
1063 *
1064 * If the handle is successfully evicted, the eviction handler should
1065 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1066 * contains logic to delay freeing the page if the page is under reclaim,
1067 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1068 *
1069 * If all buddies in the z3fold page are successfully evicted, then the
1070 * z3fold page can be freed.
1071 *
1072 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1073 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1074 * the retry limit was hit.
1075 */
1076static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1077{
d30561c5
VW
1078 int i, ret = 0;
1079 struct z3fold_header *zhdr = NULL;
1080 struct page *page = NULL;
1081 struct list_head *pos;
9a001fc1
VW
1082 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1083
1084 spin_lock(&pool->lock);
2f1e5e4d 1085 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
1086 spin_unlock(&pool->lock);
1087 return -EINVAL;
1088 }
1089 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1090 if (list_empty(&pool->lru)) {
1091 spin_unlock(&pool->lock);
1092 return -EINVAL;
1093 }
d30561c5
VW
1094 list_for_each_prev(pos, &pool->lru) {
1095 page = list_entry(pos, struct page, lru);
ca0246bb
VW
1096
1097 /* this bit could have been set by free, in which case
1098 * we pass over to the next page in the pool.
1099 */
1100 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1101 continue;
1102
1f862989
VW
1103 if (unlikely(PageIsolated(page)))
1104 continue;
d30561c5 1105 if (test_bit(PAGE_HEADLESS, &page->private))
d30561c5
VW
1106 break;
1107
1f862989 1108 zhdr = page_address(page);
ca0246bb
VW
1109 if (!z3fold_page_trylock(zhdr)) {
1110 zhdr = NULL;
d30561c5 1111 continue; /* can't evict at this point */
ca0246bb 1112 }
d30561c5
VW
1113 kref_get(&zhdr->refcount);
1114 list_del_init(&zhdr->buddy);
1115 zhdr->cpu = -1;
6098d7e1 1116 break;
d30561c5
VW
1117 }
1118
ca0246bb
VW
1119 if (!zhdr)
1120 break;
1121
5a27aa82 1122 list_del_init(&page->lru);
d30561c5 1123 spin_unlock(&pool->lock);
9a001fc1 1124
9a001fc1 1125 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1
VW
1126 /*
1127 * We need encode the handles before unlocking, since
1128 * we can race with free that will set
1129 * (first|last)_chunks to 0
1130 */
1131 first_handle = 0;
1132 last_handle = 0;
1133 middle_handle = 0;
1134 if (zhdr->first_chunks)
1135 first_handle = encode_handle(zhdr, FIRST);
1136 if (zhdr->middle_chunks)
1137 middle_handle = encode_handle(zhdr, MIDDLE);
1138 if (zhdr->last_chunks)
1139 last_handle = encode_handle(zhdr, LAST);
d30561c5
VW
1140 /*
1141 * it's safe to unlock here because we hold a
1142 * reference to this page
1143 */
2f1e5e4d 1144 z3fold_page_unlock(zhdr);
9a001fc1
VW
1145 } else {
1146 first_handle = encode_handle(zhdr, HEADLESS);
1147 last_handle = middle_handle = 0;
1148 }
1149
9a001fc1
VW
1150 /* Issue the eviction callback(s) */
1151 if (middle_handle) {
1152 ret = pool->ops->evict(pool, middle_handle);
1153 if (ret)
1154 goto next;
1155 }
1156 if (first_handle) {
1157 ret = pool->ops->evict(pool, first_handle);
1158 if (ret)
1159 goto next;
1160 }
1161 if (last_handle) {
1162 ret = pool->ops->evict(pool, last_handle);
1163 if (ret)
1164 goto next;
1165 }
1166next:
5a27aa82
VW
1167 if (test_bit(PAGE_HEADLESS, &page->private)) {
1168 if (ret == 0) {
1f862989 1169 free_z3fold_page(page, true);
ca0246bb 1170 atomic64_dec(&pool->pages_nr);
5a27aa82 1171 return 0;
5a27aa82 1172 }
6098d7e1
VW
1173 spin_lock(&pool->lock);
1174 list_add(&page->lru, &pool->lru);
1175 spin_unlock(&pool->lock);
1176 } else {
1177 z3fold_page_lock(zhdr);
ca0246bb 1178 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1
VW
1179 if (kref_put(&zhdr->refcount,
1180 release_z3fold_page_locked)) {
1181 atomic64_dec(&pool->pages_nr);
1182 return 0;
1183 }
1184 /*
1185 * if we are here, the page is still not completely
1186 * free. Take the global pool lock then to be able
1187 * to add it back to the lru list
1188 */
1189 spin_lock(&pool->lock);
1190 list_add(&page->lru, &pool->lru);
d5567c9d 1191 spin_unlock(&pool->lock);
6098d7e1 1192 z3fold_page_unlock(zhdr);
5a27aa82 1193 }
2f1e5e4d 1194
6098d7e1
VW
1195 /* We started off locked to we need to lock the pool back */
1196 spin_lock(&pool->lock);
9a001fc1
VW
1197 }
1198 spin_unlock(&pool->lock);
1199 return -EAGAIN;
1200}
1201
1202/**
1203 * z3fold_map() - maps the allocation associated with the given handle
1204 * @pool: pool in which the allocation resides
1205 * @handle: handle associated with the allocation to be mapped
1206 *
1207 * Extracts the buddy number from handle and constructs the pointer to the
1208 * correct starting chunk within the page.
1209 *
1210 * Returns: a pointer to the mapped allocation
1211 */
1212static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1213{
1214 struct z3fold_header *zhdr;
1215 struct page *page;
1216 void *addr;
1217 enum buddy buddy;
1218
9a001fc1
VW
1219 zhdr = handle_to_z3fold_header(handle);
1220 addr = zhdr;
1221 page = virt_to_page(zhdr);
1222
1223 if (test_bit(PAGE_HEADLESS, &page->private))
1224 goto out;
1225
2f1e5e4d 1226 z3fold_page_lock(zhdr);
9a001fc1
VW
1227 buddy = handle_to_buddy(handle);
1228 switch (buddy) {
1229 case FIRST:
1230 addr += ZHDR_SIZE_ALIGNED;
1231 break;
1232 case MIDDLE:
1233 addr += zhdr->start_middle << CHUNK_SHIFT;
1234 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1235 break;
1236 case LAST:
ca0246bb 1237 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1238 break;
1239 default:
1240 pr_err("unknown buddy id %d\n", buddy);
1241 WARN_ON(1);
1242 addr = NULL;
1243 break;
1244 }
2f1e5e4d 1245
1f862989
VW
1246 if (addr)
1247 zhdr->mapped_count++;
2f1e5e4d 1248 z3fold_page_unlock(zhdr);
9a001fc1 1249out:
9a001fc1
VW
1250 return addr;
1251}
1252
1253/**
1254 * z3fold_unmap() - unmaps the allocation associated with the given handle
1255 * @pool: pool in which the allocation resides
1256 * @handle: handle associated with the allocation to be unmapped
1257 */
1258static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1259{
1260 struct z3fold_header *zhdr;
1261 struct page *page;
1262 enum buddy buddy;
1263
9a001fc1
VW
1264 zhdr = handle_to_z3fold_header(handle);
1265 page = virt_to_page(zhdr);
1266
2f1e5e4d 1267 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1268 return;
9a001fc1 1269
2f1e5e4d 1270 z3fold_page_lock(zhdr);
9a001fc1
VW
1271 buddy = handle_to_buddy(handle);
1272 if (buddy == MIDDLE)
1273 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1274 zhdr->mapped_count--;
2f1e5e4d 1275 z3fold_page_unlock(zhdr);
9a001fc1
VW
1276}
1277
1278/**
1279 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1280 * @pool: pool whose size is being queried
1281 *
12d59ae6 1282 * Returns: size in pages of the given pool.
9a001fc1
VW
1283 */
1284static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1285{
12d59ae6 1286 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1287}
1288
1f862989
VW
1289static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1290{
1291 struct z3fold_header *zhdr;
1292 struct z3fold_pool *pool;
1293
1294 VM_BUG_ON_PAGE(!PageMovable(page), page);
1295 VM_BUG_ON_PAGE(PageIsolated(page), page);
1296
1297 if (test_bit(PAGE_HEADLESS, &page->private))
1298 return false;
1299
1300 zhdr = page_address(page);
1301 z3fold_page_lock(zhdr);
1302 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1303 test_bit(PAGE_STALE, &page->private))
1304 goto out;
1305
1306 pool = zhdr_to_pool(zhdr);
1307
1308 if (zhdr->mapped_count == 0) {
1309 kref_get(&zhdr->refcount);
1310 if (!list_empty(&zhdr->buddy))
1311 list_del_init(&zhdr->buddy);
1312 spin_lock(&pool->lock);
1313 if (!list_empty(&page->lru))
1314 list_del(&page->lru);
1315 spin_unlock(&pool->lock);
1316 z3fold_page_unlock(zhdr);
1317 return true;
1318 }
1319out:
1320 z3fold_page_unlock(zhdr);
1321 return false;
1322}
1323
1324static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1325 struct page *page, enum migrate_mode mode)
1326{
1327 struct z3fold_header *zhdr, *new_zhdr;
1328 struct z3fold_pool *pool;
1329 struct address_space *new_mapping;
1330
1331 VM_BUG_ON_PAGE(!PageMovable(page), page);
1332 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1333
1334 zhdr = page_address(page);
1335 pool = zhdr_to_pool(zhdr);
1336
1337 if (!trylock_page(page))
1338 return -EAGAIN;
1339
1340 if (!z3fold_page_trylock(zhdr)) {
1341 unlock_page(page);
1342 return -EAGAIN;
1343 }
1344 if (zhdr->mapped_count != 0) {
1345 z3fold_page_unlock(zhdr);
1346 unlock_page(page);
1347 return -EBUSY;
1348 }
1349 new_zhdr = page_address(newpage);
1350 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1351 newpage->private = page->private;
1352 page->private = 0;
1353 z3fold_page_unlock(zhdr);
1354 spin_lock_init(&new_zhdr->page_lock);
1355 new_mapping = page_mapping(page);
1356 __ClearPageMovable(page);
1357 ClearPagePrivate(page);
1358
1359 get_page(newpage);
1360 z3fold_page_lock(new_zhdr);
1361 if (new_zhdr->first_chunks)
1362 encode_handle(new_zhdr, FIRST);
1363 if (new_zhdr->last_chunks)
1364 encode_handle(new_zhdr, LAST);
1365 if (new_zhdr->middle_chunks)
1366 encode_handle(new_zhdr, MIDDLE);
1367 set_bit(NEEDS_COMPACTING, &newpage->private);
1368 new_zhdr->cpu = smp_processor_id();
1369 spin_lock(&pool->lock);
1370 list_add(&newpage->lru, &pool->lru);
1371 spin_unlock(&pool->lock);
1372 __SetPageMovable(newpage, new_mapping);
1373 z3fold_page_unlock(new_zhdr);
1374
1375 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1376
1377 page_mapcount_reset(page);
1378 unlock_page(page);
1379 put_page(page);
1380 return 0;
1381}
1382
1383static void z3fold_page_putback(struct page *page)
1384{
1385 struct z3fold_header *zhdr;
1386 struct z3fold_pool *pool;
1387
1388 zhdr = page_address(page);
1389 pool = zhdr_to_pool(zhdr);
1390
1391 z3fold_page_lock(zhdr);
1392 if (!list_empty(&zhdr->buddy))
1393 list_del_init(&zhdr->buddy);
1394 INIT_LIST_HEAD(&page->lru);
1395 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1396 atomic64_dec(&pool->pages_nr);
1397 return;
1398 }
1399 spin_lock(&pool->lock);
1400 list_add(&page->lru, &pool->lru);
1401 spin_unlock(&pool->lock);
1402 z3fold_page_unlock(zhdr);
1403}
1404
1405static const struct address_space_operations z3fold_aops = {
1406 .isolate_page = z3fold_page_isolate,
1407 .migratepage = z3fold_page_migrate,
1408 .putback_page = z3fold_page_putback,
1409};
1410
9a001fc1
VW
1411/*****************
1412 * zpool
1413 ****************/
1414
1415static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1416{
1417 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1418 return pool->zpool_ops->evict(pool->zpool, handle);
1419 else
1420 return -ENOENT;
1421}
1422
1423static const struct z3fold_ops z3fold_zpool_ops = {
1424 .evict = z3fold_zpool_evict
1425};
1426
1427static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1428 const struct zpool_ops *zpool_ops,
1429 struct zpool *zpool)
1430{
1431 struct z3fold_pool *pool;
1432
d30561c5
VW
1433 pool = z3fold_create_pool(name, gfp,
1434 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1435 if (pool) {
1436 pool->zpool = zpool;
1437 pool->zpool_ops = zpool_ops;
1438 }
1439 return pool;
1440}
1441
1442static void z3fold_zpool_destroy(void *pool)
1443{
1444 z3fold_destroy_pool(pool);
1445}
1446
1447static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1448 unsigned long *handle)
1449{
1450 return z3fold_alloc(pool, size, gfp, handle);
1451}
1452static void z3fold_zpool_free(void *pool, unsigned long handle)
1453{
1454 z3fold_free(pool, handle);
1455}
1456
1457static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1458 unsigned int *reclaimed)
1459{
1460 unsigned int total = 0;
1461 int ret = -EINVAL;
1462
1463 while (total < pages) {
1464 ret = z3fold_reclaim_page(pool, 8);
1465 if (ret < 0)
1466 break;
1467 total++;
1468 }
1469
1470 if (reclaimed)
1471 *reclaimed = total;
1472
1473 return ret;
1474}
1475
1476static void *z3fold_zpool_map(void *pool, unsigned long handle,
1477 enum zpool_mapmode mm)
1478{
1479 return z3fold_map(pool, handle);
1480}
1481static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1482{
1483 z3fold_unmap(pool, handle);
1484}
1485
1486static u64 z3fold_zpool_total_size(void *pool)
1487{
1488 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1489}
1490
1491static struct zpool_driver z3fold_zpool_driver = {
1492 .type = "z3fold",
1493 .owner = THIS_MODULE,
1494 .create = z3fold_zpool_create,
1495 .destroy = z3fold_zpool_destroy,
1496 .malloc = z3fold_zpool_malloc,
1497 .free = z3fold_zpool_free,
1498 .shrink = z3fold_zpool_shrink,
1499 .map = z3fold_zpool_map,
1500 .unmap = z3fold_zpool_unmap,
1501 .total_size = z3fold_zpool_total_size,
1502};
1503
1504MODULE_ALIAS("zpool-z3fold");
1505
1506static int __init init_z3fold(void)
1507{
1f862989
VW
1508 int ret;
1509
ede93213
VW
1510 /* Make sure the z3fold header is not larger than the page size */
1511 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1f862989
VW
1512 ret = z3fold_mount();
1513 if (ret)
1514 return ret;
1515
9a001fc1
VW
1516 zpool_register_driver(&z3fold_zpool_driver);
1517
1518 return 0;
1519}
1520
1521static void __exit exit_z3fold(void)
1522{
1f862989 1523 z3fold_unmount();
9a001fc1
VW
1524 zpool_unregister_driver(&z3fold_zpool_driver);
1525}
1526
1527module_init(init_z3fold);
1528module_exit(exit_z3fold);
1529
1530MODULE_LICENSE("GPL");
1531MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1532MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");