]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/z3fold.c
Docs/admin-guide/mm/damon/usage: update for DAMON monitoring target type DAMOS filter
[thirdparty/linux.git] / mm / z3fold.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
9a001fc1
VW
2/*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/atomic.h>
d30561c5 27#include <linux/sched.h>
1f862989 28#include <linux/cpumask.h>
9a001fc1
VW
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/module.h>
1f862989
VW
32#include <linux/page-flags.h>
33#include <linux/migrate.h>
34#include <linux/node.h>
35#include <linux/compaction.h>
d30561c5 36#include <linux/percpu.h>
9a001fc1 37#include <linux/preempt.h>
d30561c5 38#include <linux/workqueue.h>
9a001fc1
VW
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/zpool.h>
af4798a5 42#include <linux/kmemleak.h>
9a001fc1 43
7c2b8baa
VW
44/*
45 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46 * adjusting internal fragmentation. It also determines the number of
47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49 * in the beginning of an allocated page are occupied by z3fold header, so
50 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51 * which shows the max number of free chunks in z3fold page, also there will
52 * be 63, or 62, respectively, freelists per pool.
53 */
54#define NCHUNKS_ORDER 6
55
56#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
57#define CHUNK_SIZE (1 << CHUNK_SHIFT)
58#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
e3c0db4f 61#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
7c2b8baa
VW
62
63#define BUDDY_MASK (0x3)
64#define BUDDY_SHIFT 2
65#define SLOTS_ALIGN (0x40)
66
9a001fc1
VW
67/*****************
68 * Structures
69*****************/
ede93213 70struct z3fold_pool;
ede93213
VW
71
72enum buddy {
73 HEADLESS = 0,
74 FIRST,
75 MIDDLE,
76 LAST,
7c2b8baa
VW
77 BUDDIES_MAX = LAST
78};
79
80struct z3fold_buddy_slots {
81 /*
82 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 * be enough slots to hold all possible variants
84 */
85 unsigned long slot[BUDDY_MASK + 1];
fc548865 86 unsigned long pool; /* back link */
4a3ac931 87 rwlock_t lock;
ede93213 88};
7c2b8baa 89#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
90
91/*
d30561c5 92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 93 * z3fold page, except for HEADLESS pages
d30561c5
VW
94 * @buddy: links the z3fold page into the relevant list in the
95 * pool
2f1e5e4d 96 * @page_lock: per-page lock
d30561c5
VW
97 * @refcount: reference count for the z3fold page
98 * @work: work_struct for page layout optimization
7c2b8baa 99 * @slots: pointer to the structure holding buddy slots
bb9a374d 100 * @pool: pointer to the containing pool
d30561c5 101 * @cpu: CPU which this page "belongs" to
ede93213
VW
102 * @first_chunks: the size of the first buddy in chunks, 0 if free
103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
104 * @last_chunks: the size of the last buddy in chunks, 0 if free
105 * @first_num: the starting number (for the first handle)
1f862989 106 * @mapped_count: the number of objects currently mapped
ede93213
VW
107 */
108struct z3fold_header {
109 struct list_head buddy;
2f1e5e4d 110 spinlock_t page_lock;
5a27aa82 111 struct kref refcount;
d30561c5 112 struct work_struct work;
7c2b8baa 113 struct z3fold_buddy_slots *slots;
bb9a374d 114 struct z3fold_pool *pool;
d30561c5 115 short cpu;
ede93213
VW
116 unsigned short first_chunks;
117 unsigned short middle_chunks;
118 unsigned short last_chunks;
119 unsigned short start_middle;
120 unsigned short first_num:2;
1f862989 121 unsigned short mapped_count:2;
4a3ac931 122 unsigned short foreign_handles:2;
ede93213
VW
123};
124
9a001fc1
VW
125/**
126 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5 127 * @name: pool name
e774a7bc 128 * @lock: protects pool unbuddied lists
d30561c5
VW
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
d30561c5 133 * @stale: list of pages marked for freeing
9a001fc1 134 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 135 * @c_handle: cache for z3fold_buddy_slots allocation
30522175
MG
136 * @zpool: zpool driver
137 * @zpool_ops: zpool operations structure with an evict callback
d30561c5
VW
138 * @compact_wq: workqueue for page layout background optimization
139 * @release_wq: workqueue for safe page release
140 * @work: work_struct for safe page release
9a001fc1
VW
141 *
142 * This structure is allocated at pool creation time and maintains metadata
143 * pertaining to a particular z3fold pool.
144 */
145struct z3fold_pool {
d30561c5 146 const char *name;
9a001fc1 147 spinlock_t lock;
d30561c5
VW
148 spinlock_t stale_lock;
149 struct list_head *unbuddied;
d30561c5 150 struct list_head stale;
12d59ae6 151 atomic64_t pages_nr;
7c2b8baa 152 struct kmem_cache *c_handle;
d30561c5
VW
153 struct workqueue_struct *compact_wq;
154 struct workqueue_struct *release_wq;
155 struct work_struct work;
9a001fc1
VW
156};
157
9a001fc1
VW
158/*
159 * Internal z3fold page flags
160 */
161enum z3fold_page_flags {
5a27aa82 162 PAGE_HEADLESS = 0,
9a001fc1 163 MIDDLE_CHUNK_MAPPED,
d30561c5 164 NEEDS_COMPACTING,
6098d7e1 165 PAGE_STALE,
ca0246bb 166 PAGE_CLAIMED, /* by either reclaim or free */
943fb61d 167 PAGE_MIGRATED, /* page is migrated and soon to be released */
9a001fc1
VW
168};
169
dcf5aedb
VW
170/*
171 * handle flags, go under HANDLE_FLAG_MASK
172 */
173enum z3fold_handle_flags {
174 HANDLES_NOFREE = 0,
175};
176
4a3ac931
VW
177/*
178 * Forward declarations
179 */
180static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
181static void compact_page_work(struct work_struct *w);
182
9a001fc1
VW
183/*****************
184 * Helpers
185*****************/
186
187/* Converts an allocation size in bytes to size in z3fold chunks */
188static int size_to_chunks(size_t size)
189{
190 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
191}
192
193#define for_each_unbuddied_list(_iter, _begin) \
194 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
195
bb9f6f63
VW
196static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 gfp_t gfp)
7c2b8baa 198{
f4bad643
ML
199 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
200 gfp);
7c2b8baa
VW
201
202 if (slots) {
af4798a5
QC
203 /* It will be freed separately in free_handle(). */
204 kmemleak_not_leak(slots);
7c2b8baa 205 slots->pool = (unsigned long)pool;
4a3ac931 206 rwlock_init(&slots->lock);
7c2b8baa
VW
207 }
208
209 return slots;
210}
211
212static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213{
214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215}
216
217static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218{
219 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220}
221
4a3ac931
VW
222/* Lock a z3fold page */
223static inline void z3fold_page_lock(struct z3fold_header *zhdr)
224{
225 spin_lock(&zhdr->page_lock);
226}
227
228/* Try to lock a z3fold page */
229static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
230{
231 return spin_trylock(&zhdr->page_lock);
232}
233
234/* Unlock a z3fold page */
235static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
236{
237 spin_unlock(&zhdr->page_lock);
238}
239
767cc6c5
ML
240/* return locked z3fold page if it's not headless */
241static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
4a3ac931
VW
242{
243 struct z3fold_buddy_slots *slots;
244 struct z3fold_header *zhdr;
245 int locked = 0;
246
247 if (!(handle & (1 << PAGE_HEADLESS))) {
248 slots = handle_to_slots(handle);
249 do {
250 unsigned long addr;
251
252 read_lock(&slots->lock);
253 addr = *(unsigned long *)handle;
254 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
767cc6c5 255 locked = z3fold_page_trylock(zhdr);
4a3ac931 256 read_unlock(&slots->lock);
943fb61d
ML
257 if (locked) {
258 struct page *page = virt_to_page(zhdr);
259
260 if (!test_bit(PAGE_MIGRATED, &page->private))
261 break;
262 z3fold_page_unlock(zhdr);
263 }
4a3ac931 264 cpu_relax();
767cc6c5 265 } while (true);
4a3ac931
VW
266 } else {
267 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
268 }
269
270 return zhdr;
271}
272
4a3ac931
VW
273static inline void put_z3fold_header(struct z3fold_header *zhdr)
274{
275 struct page *page = virt_to_page(zhdr);
276
277 if (!test_bit(PAGE_HEADLESS, &page->private))
278 z3fold_page_unlock(zhdr);
279}
280
fc548865 281static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
7c2b8baa
VW
282{
283 struct z3fold_buddy_slots *slots;
284 int i;
285 bool is_free;
286
4a3ac931
VW
287 if (WARN_ON(*(unsigned long *)handle == 0))
288 return;
289
7c2b8baa 290 slots = handle_to_slots(handle);
4a3ac931
VW
291 write_lock(&slots->lock);
292 *(unsigned long *)handle = 0;
dcf5aedb
VW
293
294 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
295 write_unlock(&slots->lock);
296 return; /* simple case, nothing else to do */
297 }
298
fc548865
VW
299 if (zhdr->slots != slots)
300 zhdr->foreign_handles--;
4a3ac931 301
7c2b8baa
VW
302 is_free = true;
303 for (i = 0; i <= BUDDY_MASK; i++) {
304 if (slots->slot[i]) {
305 is_free = false;
306 break;
307 }
308 }
d8f117ab 309 write_unlock(&slots->lock);
7c2b8baa
VW
310
311 if (is_free) {
312 struct z3fold_pool *pool = slots_to_pool(slots);
313
fc548865
VW
314 if (zhdr->slots == slots)
315 zhdr->slots = NULL;
7c2b8baa
VW
316 kmem_cache_free(pool->c_handle, slots);
317 }
318}
319
9a001fc1 320/* Initializes the z3fold header of a newly allocated z3fold page */
63398413 321static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
bb9f6f63 322 struct z3fold_pool *pool, gfp_t gfp)
9a001fc1
VW
323{
324 struct z3fold_header *zhdr = page_address(page);
63398413 325 struct z3fold_buddy_slots *slots;
9a001fc1 326
9a001fc1
VW
327 clear_bit(PAGE_HEADLESS, &page->private);
328 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
329 clear_bit(NEEDS_COMPACTING, &page->private);
330 clear_bit(PAGE_STALE, &page->private);
ca0246bb 331 clear_bit(PAGE_CLAIMED, &page->private);
943fb61d 332 clear_bit(PAGE_MIGRATED, &page->private);
63398413
VW
333 if (headless)
334 return zhdr;
335
336 slots = alloc_slots(pool, gfp);
337 if (!slots)
338 return NULL;
9a001fc1 339
c457cd96 340 memset(zhdr, 0, sizeof(*zhdr));
2f1e5e4d 341 spin_lock_init(&zhdr->page_lock);
5a27aa82 342 kref_init(&zhdr->refcount);
d30561c5 343 zhdr->cpu = -1;
7c2b8baa 344 zhdr->slots = slots;
bb9a374d 345 zhdr->pool = pool;
9a001fc1 346 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 347 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
348 return zhdr;
349}
350
351/* Resets the struct page fields and frees the page */
1f862989 352static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 353{
1f862989
VW
354 if (!headless) {
355 lock_page(page);
356 __ClearPageMovable(page);
357 unlock_page(page);
358 }
5a27aa82
VW
359 __free_page(page);
360}
361
7c2b8baa
VW
362/* Helper function to build the index */
363static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
364{
365 return (bud + zhdr->first_num) & BUDDY_MASK;
366}
367
9a001fc1
VW
368/*
369 * Encodes the handle of a particular buddy within a z3fold page
370 * Pool lock should be held as this function accesses first_num
371 */
3f9d2b57
VW
372static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 struct z3fold_buddy_slots *slots,
374 enum buddy bud)
9a001fc1 375{
7c2b8baa
VW
376 unsigned long h = (unsigned long)zhdr;
377 int idx = 0;
9a001fc1 378
7c2b8baa
VW
379 /*
380 * For a headless page, its handle is its pointer with the extra
381 * PAGE_HEADLESS bit set
382 */
383 if (bud == HEADLESS)
384 return h | (1 << PAGE_HEADLESS);
385
386 /* otherwise, return pointer to encoded handle */
387 idx = __idx(zhdr, bud);
388 h += idx;
389 if (bud == LAST)
390 h |= (zhdr->last_chunks << BUDDY_SHIFT);
391
4a3ac931 392 write_lock(&slots->lock);
7c2b8baa 393 slots->slot[idx] = h;
4a3ac931 394 write_unlock(&slots->lock);
7c2b8baa 395 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
396}
397
3f9d2b57
VW
398static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
399{
400 return __encode_handle(zhdr, zhdr->slots, bud);
401}
402
ca0246bb
VW
403/* only for LAST bud, returns zero otherwise */
404static unsigned short handle_to_chunks(unsigned long handle)
405{
4a3ac931
VW
406 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
407 unsigned long addr;
7c2b8baa 408
4a3ac931
VW
409 read_lock(&slots->lock);
410 addr = *(unsigned long *)handle;
411 read_unlock(&slots->lock);
7c2b8baa 412 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
413}
414
f201ebd8 415/*
416 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
417 * but that doesn't matter. because the masking will result in the
418 * correct buddy number.
419 */
9a001fc1
VW
420static enum buddy handle_to_buddy(unsigned long handle)
421{
7c2b8baa 422 struct z3fold_header *zhdr;
4a3ac931 423 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
7c2b8baa
VW
424 unsigned long addr;
425
4a3ac931 426 read_lock(&slots->lock);
7c2b8baa
VW
427 WARN_ON(handle & (1 << PAGE_HEADLESS));
428 addr = *(unsigned long *)handle;
4a3ac931 429 read_unlock(&slots->lock);
7c2b8baa
VW
430 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
431 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
432}
433
9050cce1
VW
434static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
435{
bb9a374d 436 return zhdr->pool;
9050cce1
VW
437}
438
d30561c5
VW
439static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
440{
441 struct page *page = virt_to_page(zhdr);
9050cce1 442 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
443
444 WARN_ON(!list_empty(&zhdr->buddy));
445 set_bit(PAGE_STALE, &page->private);
35529357 446 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5 447 spin_lock(&pool->lock);
d30561c5 448 spin_unlock(&pool->lock);
4a3ac931 449
d30561c5
VW
450 if (locked)
451 z3fold_page_unlock(zhdr);
4a3ac931 452
d30561c5
VW
453 spin_lock(&pool->stale_lock);
454 list_add(&zhdr->buddy, &pool->stale);
455 queue_work(pool->release_wq, &pool->work);
456 spin_unlock(&pool->stale_lock);
5e36c25b
ML
457
458 atomic64_dec(&pool->pages_nr);
d30561c5
VW
459}
460
d30561c5
VW
461static void release_z3fold_page_locked(struct kref *ref)
462{
463 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
464 refcount);
465 WARN_ON(z3fold_page_trylock(zhdr));
466 __release_z3fold_page(zhdr, true);
467}
468
469static void release_z3fold_page_locked_list(struct kref *ref)
470{
471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 refcount);
9050cce1 473 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
4a3ac931 474
9050cce1 475 spin_lock(&pool->lock);
d30561c5 476 list_del_init(&zhdr->buddy);
9050cce1 477 spin_unlock(&pool->lock);
d30561c5
VW
478
479 WARN_ON(z3fold_page_trylock(zhdr));
480 __release_z3fold_page(zhdr, true);
481}
482
483static void free_pages_work(struct work_struct *w)
484{
485 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
486
487 spin_lock(&pool->stale_lock);
488 while (!list_empty(&pool->stale)) {
489 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
490 struct z3fold_header, buddy);
491 struct page *page = virt_to_page(zhdr);
492
493 list_del(&zhdr->buddy);
494 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
495 continue;
d30561c5
VW
496 spin_unlock(&pool->stale_lock);
497 cancel_work_sync(&zhdr->work);
1f862989 498 free_z3fold_page(page, false);
d30561c5
VW
499 cond_resched();
500 spin_lock(&pool->stale_lock);
501 }
502 spin_unlock(&pool->stale_lock);
503}
504
9a001fc1
VW
505/*
506 * Returns the number of free chunks in a z3fold page.
507 * NB: can't be used with HEADLESS pages.
508 */
509static int num_free_chunks(struct z3fold_header *zhdr)
510{
511 int nfree;
512 /*
513 * If there is a middle object, pick up the bigger free space
514 * either before or after it. Otherwise just subtract the number
515 * of chunks occupied by the first and the last objects.
516 */
517 if (zhdr->middle_chunks != 0) {
518 int nfree_before = zhdr->first_chunks ?
ede93213 519 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 520 int nfree_after = zhdr->last_chunks ?
ede93213
VW
521 0 : TOTAL_CHUNKS -
522 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
523 nfree = max(nfree_before, nfree_after);
524 } else
525 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
526 return nfree;
527}
528
9050cce1
VW
529/* Add to the appropriate unbuddied list */
530static inline void add_to_unbuddied(struct z3fold_pool *pool,
531 struct z3fold_header *zhdr)
532{
533 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
534 zhdr->middle_chunks == 0) {
135f97fd 535 struct list_head *unbuddied;
9050cce1 536 int freechunks = num_free_chunks(zhdr);
135f97fd
VW
537
538 migrate_disable();
539 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
540 spin_lock(&pool->lock);
541 list_add(&zhdr->buddy, &unbuddied[freechunks]);
542 spin_unlock(&pool->lock);
543 zhdr->cpu = smp_processor_id();
135f97fd 544 migrate_enable();
9050cce1
VW
545 }
546}
547
dcf5aedb
VW
548static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
549{
550 enum buddy bud = HEADLESS;
551
552 if (zhdr->middle_chunks) {
553 if (!zhdr->first_chunks &&
554 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
555 bud = FIRST;
556 else if (!zhdr->last_chunks)
557 bud = LAST;
558 } else {
559 if (!zhdr->first_chunks)
560 bud = FIRST;
561 else if (!zhdr->last_chunks)
562 bud = LAST;
563 else
564 bud = MIDDLE;
565 }
566
567 return bud;
568}
569
ede93213
VW
570static inline void *mchunk_memmove(struct z3fold_header *zhdr,
571 unsigned short dst_chunk)
572{
573 void *beg = zhdr;
574 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
575 beg + (zhdr->start_middle << CHUNK_SHIFT),
576 zhdr->middle_chunks << CHUNK_SHIFT);
577}
578
4a3ac931
VW
579static inline bool buddy_single(struct z3fold_header *zhdr)
580{
581 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
582 (zhdr->first_chunks && zhdr->last_chunks) ||
583 (zhdr->middle_chunks && zhdr->last_chunks));
584}
585
586static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
587{
588 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
589 void *p = zhdr;
590 unsigned long old_handle = 0;
591 size_t sz = 0;
592 struct z3fold_header *new_zhdr = NULL;
593 int first_idx = __idx(zhdr, FIRST);
594 int middle_idx = __idx(zhdr, MIDDLE);
595 int last_idx = __idx(zhdr, LAST);
596 unsigned short *moved_chunks = NULL;
597
598 /*
599 * No need to protect slots here -- all the slots are "local" and
600 * the page lock is already taken
601 */
602 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
603 p += ZHDR_SIZE_ALIGNED;
604 sz = zhdr->first_chunks << CHUNK_SHIFT;
605 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
606 moved_chunks = &zhdr->first_chunks;
607 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
608 p += zhdr->start_middle << CHUNK_SHIFT;
609 sz = zhdr->middle_chunks << CHUNK_SHIFT;
610 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
611 moved_chunks = &zhdr->middle_chunks;
612 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
613 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
614 sz = zhdr->last_chunks << CHUNK_SHIFT;
615 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
616 moved_chunks = &zhdr->last_chunks;
617 }
618
619 if (sz > 0) {
620 enum buddy new_bud = HEADLESS;
621 short chunks = size_to_chunks(sz);
622 void *q;
623
624 new_zhdr = __z3fold_alloc(pool, sz, false);
625 if (!new_zhdr)
626 return NULL;
627
628 if (WARN_ON(new_zhdr == zhdr))
629 goto out_fail;
630
dcf5aedb 631 new_bud = get_free_buddy(new_zhdr, chunks);
4a3ac931
VW
632 q = new_zhdr;
633 switch (new_bud) {
634 case FIRST:
635 new_zhdr->first_chunks = chunks;
636 q += ZHDR_SIZE_ALIGNED;
637 break;
638 case MIDDLE:
639 new_zhdr->middle_chunks = chunks;
640 new_zhdr->start_middle =
641 new_zhdr->first_chunks + ZHDR_CHUNKS;
642 q += new_zhdr->start_middle << CHUNK_SHIFT;
643 break;
644 case LAST:
645 new_zhdr->last_chunks = chunks;
646 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
647 break;
648 default:
649 goto out_fail;
650 }
651 new_zhdr->foreign_handles++;
652 memcpy(q, p, sz);
653 write_lock(&zhdr->slots->lock);
654 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
655 __idx(new_zhdr, new_bud);
656 if (new_bud == LAST)
657 *(unsigned long *)old_handle |=
658 (new_zhdr->last_chunks << BUDDY_SHIFT);
659 write_unlock(&zhdr->slots->lock);
660 add_to_unbuddied(pool, new_zhdr);
661 z3fold_page_unlock(new_zhdr);
662
663 *moved_chunks = 0;
664 }
665
666 return new_zhdr;
667
668out_fail:
5e36c25b
ML
669 if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
670 add_to_unbuddied(pool, new_zhdr);
671 z3fold_page_unlock(new_zhdr);
4a3ac931
VW
672 }
673 return NULL;
674
675}
676
1b096e5a 677#define BIG_CHUNK_GAP 3
9a001fc1
VW
678/* Has to be called with lock held */
679static int z3fold_compact_page(struct z3fold_header *zhdr)
680{
681 struct page *page = virt_to_page(zhdr);
9a001fc1 682
ede93213
VW
683 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
684 return 0; /* can't move middle chunk, it's used */
9a001fc1 685
1f862989
VW
686 if (unlikely(PageIsolated(page)))
687 return 0;
688
ede93213
VW
689 if (zhdr->middle_chunks == 0)
690 return 0; /* nothing to compact */
691
692 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
693 /* move to the beginning */
694 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
695 zhdr->first_chunks = zhdr->middle_chunks;
696 zhdr->middle_chunks = 0;
697 zhdr->start_middle = 0;
698 zhdr->first_num++;
1b096e5a 699 return 1;
9a001fc1 700 }
1b096e5a
VW
701
702 /*
703 * moving data is expensive, so let's only do that if
704 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
705 */
706 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
707 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
708 BIG_CHUNK_GAP) {
709 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
710 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
711 return 1;
712 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
713 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
714 + zhdr->middle_chunks) >=
715 BIG_CHUNK_GAP) {
716 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
717 zhdr->middle_chunks;
718 mchunk_memmove(zhdr, new_start);
719 zhdr->start_middle = new_start;
720 return 1;
721 }
722
723 return 0;
9a001fc1
VW
724}
725
d30561c5
VW
726static void do_compact_page(struct z3fold_header *zhdr, bool locked)
727{
9050cce1 728 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 729 struct page *page;
d30561c5
VW
730
731 page = virt_to_page(zhdr);
732 if (locked)
733 WARN_ON(z3fold_page_trylock(zhdr));
734 else
735 z3fold_page_lock(zhdr);
5d03a661 736 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
737 z3fold_page_unlock(zhdr);
738 return;
739 }
740 spin_lock(&pool->lock);
741 list_del_init(&zhdr->buddy);
742 spin_unlock(&pool->lock);
743
5e36c25b 744 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
5d03a661 745 return;
5d03a661 746
dcf5aedb
VW
747 if (test_bit(PAGE_STALE, &page->private) ||
748 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1f862989
VW
749 z3fold_page_unlock(zhdr);
750 return;
751 }
752
4a3ac931
VW
753 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
754 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
5e36c25b 755 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
dcf5aedb 756 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 757 z3fold_page_unlock(zhdr);
dcf5aedb 758 }
4a3ac931
VW
759 return;
760 }
761
d30561c5 762 z3fold_compact_page(zhdr);
9050cce1 763 add_to_unbuddied(pool, zhdr);
dcf5aedb 764 clear_bit(PAGE_CLAIMED, &page->private);
d30561c5
VW
765 z3fold_page_unlock(zhdr);
766}
767
768static void compact_page_work(struct work_struct *w)
769{
770 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
771 work);
772
773 do_compact_page(zhdr, false);
774}
775
9050cce1
VW
776/* returns _locked_ z3fold page header or NULL */
777static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
778 size_t size, bool can_sleep)
779{
780 struct z3fold_header *zhdr = NULL;
781 struct page *page;
782 struct list_head *unbuddied;
783 int chunks = size_to_chunks(size), i;
784
785lookup:
135f97fd 786 migrate_disable();
9050cce1 787 /* First, try to find an unbuddied z3fold page. */
135f97fd 788 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
789 for_each_unbuddied_list(i, chunks) {
790 struct list_head *l = &unbuddied[i];
791
792 zhdr = list_first_entry_or_null(READ_ONCE(l),
793 struct z3fold_header, buddy);
794
795 if (!zhdr)
796 continue;
797
798 /* Re-check under lock. */
799 spin_lock(&pool->lock);
9050cce1
VW
800 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
801 struct z3fold_header, buddy)) ||
802 !z3fold_page_trylock(zhdr)) {
803 spin_unlock(&pool->lock);
804 zhdr = NULL;
135f97fd 805 migrate_enable();
9050cce1
VW
806 if (can_sleep)
807 cond_resched();
808 goto lookup;
809 }
810 list_del_init(&zhdr->buddy);
811 zhdr->cpu = -1;
812 spin_unlock(&pool->lock);
813
814 page = virt_to_page(zhdr);
4a3ac931
VW
815 if (test_bit(NEEDS_COMPACTING, &page->private) ||
816 test_bit(PAGE_CLAIMED, &page->private)) {
9050cce1
VW
817 z3fold_page_unlock(zhdr);
818 zhdr = NULL;
135f97fd 819 migrate_enable();
9050cce1
VW
820 if (can_sleep)
821 cond_resched();
822 goto lookup;
823 }
824
825 /*
826 * this page could not be removed from its unbuddied
827 * list while pool lock was held, and then we've taken
828 * page lock so kref_put could not be called before
829 * we got here, so it's safe to just call kref_get()
830 */
831 kref_get(&zhdr->refcount);
832 break;
833 }
135f97fd 834 migrate_enable();
9050cce1 835
351618b2
VW
836 if (!zhdr) {
837 int cpu;
838
839 /* look for _exact_ match on other cpus' lists */
840 for_each_online_cpu(cpu) {
841 struct list_head *l;
842
843 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
844 spin_lock(&pool->lock);
845 l = &unbuddied[chunks];
846
847 zhdr = list_first_entry_or_null(READ_ONCE(l),
848 struct z3fold_header, buddy);
849
850 if (!zhdr || !z3fold_page_trylock(zhdr)) {
851 spin_unlock(&pool->lock);
852 zhdr = NULL;
853 continue;
854 }
855 list_del_init(&zhdr->buddy);
856 zhdr->cpu = -1;
857 spin_unlock(&pool->lock);
858
859 page = virt_to_page(zhdr);
4a3ac931
VW
860 if (test_bit(NEEDS_COMPACTING, &page->private) ||
861 test_bit(PAGE_CLAIMED, &page->private)) {
351618b2
VW
862 z3fold_page_unlock(zhdr);
863 zhdr = NULL;
864 if (can_sleep)
865 cond_resched();
866 continue;
867 }
868 kref_get(&zhdr->refcount);
869 break;
870 }
871 }
872
7c61c35b 873 if (zhdr && !zhdr->slots) {
4c6bdb36 874 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
7c61c35b
ML
875 if (!zhdr->slots)
876 goto out_fail;
877 }
9050cce1 878 return zhdr;
7c61c35b
ML
879
880out_fail:
881 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
882 add_to_unbuddied(pool, zhdr);
883 z3fold_page_unlock(zhdr);
884 }
885 return NULL;
9050cce1 886}
d30561c5
VW
887
888/*
889 * API Functions
890 */
891
892/**
893 * z3fold_create_pool() - create a new z3fold pool
894 * @name: pool name
895 * @gfp: gfp flags when allocating the z3fold pool structure
d30561c5
VW
896 *
897 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
898 * failed.
899 */
6a05aa30 900static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
d30561c5
VW
901{
902 struct z3fold_pool *pool = NULL;
903 int i, cpu;
904
905 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
906 if (!pool)
907 goto out;
7c2b8baa
VW
908 pool->c_handle = kmem_cache_create("z3fold_handle",
909 sizeof(struct z3fold_buddy_slots),
910 SLOTS_ALIGN, 0, NULL);
911 if (!pool->c_handle)
912 goto out_c;
d30561c5
VW
913 spin_lock_init(&pool->lock);
914 spin_lock_init(&pool->stale_lock);
e891f60e
ML
915 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
916 __alignof__(struct list_head));
1ec6995d
XW
917 if (!pool->unbuddied)
918 goto out_pool;
d30561c5
VW
919 for_each_possible_cpu(cpu) {
920 struct list_head *unbuddied =
921 per_cpu_ptr(pool->unbuddied, cpu);
922 for_each_unbuddied_list(i, 0)
923 INIT_LIST_HEAD(&unbuddied[i]);
924 }
d30561c5
VW
925 INIT_LIST_HEAD(&pool->stale);
926 atomic64_set(&pool->pages_nr, 0);
927 pool->name = name;
928 pool->compact_wq = create_singlethread_workqueue(pool->name);
929 if (!pool->compact_wq)
1ec6995d 930 goto out_unbuddied;
d30561c5
VW
931 pool->release_wq = create_singlethread_workqueue(pool->name);
932 if (!pool->release_wq)
933 goto out_wq;
934 INIT_WORK(&pool->work, free_pages_work);
d30561c5
VW
935 return pool;
936
937out_wq:
938 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
939out_unbuddied:
940 free_percpu(pool->unbuddied);
941out_pool:
7c2b8baa
VW
942 kmem_cache_destroy(pool->c_handle);
943out_c:
d30561c5 944 kfree(pool);
1ec6995d 945out:
d30561c5
VW
946 return NULL;
947}
948
949/**
950 * z3fold_destroy_pool() - destroys an existing z3fold pool
951 * @pool: the z3fold pool to be destroyed
952 *
953 * The pool should be emptied before this function is called.
954 */
955static void z3fold_destroy_pool(struct z3fold_pool *pool)
956{
7c2b8baa 957 kmem_cache_destroy(pool->c_handle);
6051d3bd
HB
958
959 /*
960 * We need to destroy pool->compact_wq before pool->release_wq,
961 * as any pending work on pool->compact_wq will call
962 * queue_work(pool->release_wq, &pool->work).
b997052b
HB
963 *
964 * There are still outstanding pages until both workqueues are drained,
965 * so we cannot unregister migration until then.
6051d3bd
HB
966 */
967
d30561c5 968 destroy_workqueue(pool->compact_wq);
6051d3bd 969 destroy_workqueue(pool->release_wq);
dac0d1cf 970 free_percpu(pool->unbuddied);
d30561c5
VW
971 kfree(pool);
972}
973
68f2736a
MWO
974static const struct movable_operations z3fold_mops;
975
9a001fc1
VW
976/**
977 * z3fold_alloc() - allocates a region of a given size
978 * @pool: z3fold pool from which to allocate
979 * @size: size in bytes of the desired allocation
980 * @gfp: gfp flags used if the pool needs to grow
981 * @handle: handle of the new allocation
982 *
983 * This function will attempt to find a free region in the pool large enough to
984 * satisfy the allocation request. A search of the unbuddied lists is
985 * performed first. If no suitable free region is found, then a new page is
986 * allocated and added to the pool to satisfy the request.
987 *
9a001fc1
VW
988 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
989 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
990 * a new page.
991 */
992static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
993 unsigned long *handle)
994{
9050cce1 995 int chunks = size_to_chunks(size);
9a001fc1 996 struct z3fold_header *zhdr = NULL;
d30561c5 997 struct page *page = NULL;
9a001fc1 998 enum buddy bud;
8a97ea54 999 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1 1000
f4bad643 1001 if (!size || (gfp & __GFP_HIGHMEM))
9a001fc1
VW
1002 return -EINVAL;
1003
1004 if (size > PAGE_SIZE)
1005 return -ENOSPC;
1006
1007 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1008 bud = HEADLESS;
1009 else {
9050cce1
VW
1010retry:
1011 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 1012 if (zhdr) {
dcf5aedb
VW
1013 bud = get_free_buddy(zhdr, chunks);
1014 if (bud == HEADLESS) {
5e36c25b 1015 if (!kref_put(&zhdr->refcount,
d30561c5 1016 release_z3fold_page_locked))
d30561c5 1017 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
1018 pr_err("No free chunks in unbuddied\n");
1019 WARN_ON(1);
9050cce1 1020 goto retry;
9a001fc1 1021 }
9050cce1 1022 page = virt_to_page(zhdr);
2f1e5e4d 1023 goto found;
9a001fc1
VW
1024 }
1025 bud = FIRST;
9a001fc1
VW
1026 }
1027
df6f0f1d 1028 page = alloc_page(gfp);
9a001fc1
VW
1029 if (!page)
1030 return -ENOMEM;
2f1e5e4d 1031
63398413 1032 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
9050cce1
VW
1033 if (!zhdr) {
1034 __free_page(page);
1035 return -ENOMEM;
1036 }
1037 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
1038
1039 if (bud == HEADLESS) {
1040 set_bit(PAGE_HEADLESS, &page->private);
1041 goto headless;
1042 }
810481a2
HB
1043 if (can_sleep) {
1044 lock_page(page);
68f2736a 1045 __SetPageMovable(page, &z3fold_mops);
810481a2
HB
1046 unlock_page(page);
1047 } else {
2c0f3514 1048 WARN_ON(!trylock_page(page));
68f2736a 1049 __SetPageMovable(page, &z3fold_mops);
2c0f3514 1050 unlock_page(page);
810481a2 1051 }
2f1e5e4d 1052 z3fold_page_lock(zhdr);
9a001fc1
VW
1053
1054found:
1055 if (bud == FIRST)
1056 zhdr->first_chunks = chunks;
1057 else if (bud == LAST)
1058 zhdr->last_chunks = chunks;
1059 else {
1060 zhdr->middle_chunks = chunks;
ede93213 1061 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 1062 }
9050cce1 1063 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
1064
1065headless:
d30561c5 1066 spin_lock(&pool->lock);
9a001fc1
VW
1067 *handle = encode_handle(zhdr, bud);
1068 spin_unlock(&pool->lock);
2f1e5e4d
VW
1069 if (bud != HEADLESS)
1070 z3fold_page_unlock(zhdr);
9a001fc1
VW
1071
1072 return 0;
1073}
1074
1075/**
1076 * z3fold_free() - frees the allocation associated with the given handle
1077 * @pool: pool in which the allocation resided
1078 * @handle: handle associated with the allocation returned by z3fold_alloc()
1079 *
1080 * In the case that the z3fold page in which the allocation resides is under
ed0e5dca
ML
1081 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1082 * only sets the first|middle|last_chunks to 0. The page is actually freed
1083 * once all buddies are evicted (see z3fold_reclaim_page() below).
9a001fc1
VW
1084 */
1085static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1086{
1087 struct z3fold_header *zhdr;
9a001fc1
VW
1088 struct page *page;
1089 enum buddy bud;
5b6807de 1090 bool page_claimed;
9a001fc1 1091
4a3ac931 1092 zhdr = get_z3fold_header(handle);
9a001fc1 1093 page = virt_to_page(zhdr);
5b6807de 1094 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
9a001fc1
VW
1095
1096 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
1097 /* if a headless page is under reclaim, just leave.
1098 * NB: we use test_and_set_bit for a reason: if the bit
1099 * has not been set before, we release this page
1100 * immediately so we don't care about its value any more.
1101 */
5b6807de 1102 if (!page_claimed) {
4a3ac931 1103 put_z3fold_header(zhdr);
1f862989 1104 free_z3fold_page(page, true);
ca0246bb 1105 atomic64_dec(&pool->pages_nr);
9a001fc1 1106 }
ca0246bb 1107 return;
9a001fc1
VW
1108 }
1109
ca0246bb 1110 /* Non-headless case */
ca0246bb
VW
1111 bud = handle_to_buddy(handle);
1112
1113 switch (bud) {
1114 case FIRST:
1115 zhdr->first_chunks = 0;
1116 break;
1117 case MIDDLE:
1118 zhdr->middle_chunks = 0;
1119 break;
1120 case LAST:
1121 zhdr->last_chunks = 0;
1122 break;
1123 default:
1124 pr_err("%s: unknown bud %d\n", __func__, bud);
1125 WARN_ON(1);
4a3ac931 1126 put_z3fold_header(zhdr);
d30561c5
VW
1127 return;
1128 }
1129
4a3ac931 1130 if (!page_claimed)
fc548865 1131 free_handle(handle, zhdr);
5e36c25b 1132 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
d30561c5 1133 return;
5b6807de
VW
1134 if (page_claimed) {
1135 /* the page has not been claimed by us */
ed0e5dca 1136 put_z3fold_header(zhdr);
6098d7e1
VW
1137 return;
1138 }
dcf5aedb 1139 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5b6807de 1140 clear_bit(PAGE_CLAIMED, &page->private);
4a1c3839 1141 put_z3fold_header(zhdr);
d30561c5
VW
1142 return;
1143 }
1144 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
d30561c5 1145 zhdr->cpu = -1;
5d03a661 1146 kref_get(&zhdr->refcount);
5b6807de 1147 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 1148 do_compact_page(zhdr, true);
d30561c5 1149 return;
9a001fc1 1150 }
5d03a661 1151 kref_get(&zhdr->refcount);
5b6807de 1152 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931
VW
1153 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1154 put_z3fold_header(zhdr);
9a001fc1
VW
1155}
1156
9a001fc1
VW
1157/**
1158 * z3fold_map() - maps the allocation associated with the given handle
1159 * @pool: pool in which the allocation resides
1160 * @handle: handle associated with the allocation to be mapped
1161 *
1162 * Extracts the buddy number from handle and constructs the pointer to the
1163 * correct starting chunk within the page.
1164 *
1165 * Returns: a pointer to the mapped allocation
1166 */
1167static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1168{
1169 struct z3fold_header *zhdr;
1170 struct page *page;
1171 void *addr;
1172 enum buddy buddy;
1173
4a3ac931 1174 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1175 addr = zhdr;
1176 page = virt_to_page(zhdr);
1177
1178 if (test_bit(PAGE_HEADLESS, &page->private))
1179 goto out;
1180
1181 buddy = handle_to_buddy(handle);
1182 switch (buddy) {
1183 case FIRST:
1184 addr += ZHDR_SIZE_ALIGNED;
1185 break;
1186 case MIDDLE:
1187 addr += zhdr->start_middle << CHUNK_SHIFT;
1188 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1189 break;
1190 case LAST:
ca0246bb 1191 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1192 break;
1193 default:
1194 pr_err("unknown buddy id %d\n", buddy);
1195 WARN_ON(1);
1196 addr = NULL;
1197 break;
1198 }
2f1e5e4d 1199
1f862989
VW
1200 if (addr)
1201 zhdr->mapped_count++;
9a001fc1 1202out:
4a3ac931 1203 put_z3fold_header(zhdr);
9a001fc1
VW
1204 return addr;
1205}
1206
1207/**
1208 * z3fold_unmap() - unmaps the allocation associated with the given handle
1209 * @pool: pool in which the allocation resides
1210 * @handle: handle associated with the allocation to be unmapped
1211 */
1212static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1213{
1214 struct z3fold_header *zhdr;
1215 struct page *page;
1216 enum buddy buddy;
1217
4a3ac931 1218 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1219 page = virt_to_page(zhdr);
1220
2f1e5e4d 1221 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1222 return;
9a001fc1
VW
1223
1224 buddy = handle_to_buddy(handle);
1225 if (buddy == MIDDLE)
1226 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1227 zhdr->mapped_count--;
4a3ac931 1228 put_z3fold_header(zhdr);
9a001fc1
VW
1229}
1230
1231/**
1232 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1233 * @pool: pool whose size is being queried
1234 *
12d59ae6 1235 * Returns: size in pages of the given pool.
9a001fc1
VW
1236 */
1237static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1238{
12d59ae6 1239 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1240}
1241
1f862989
VW
1242static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1243{
1244 struct z3fold_header *zhdr;
1245 struct z3fold_pool *pool;
1246
1f862989
VW
1247 VM_BUG_ON_PAGE(PageIsolated(page), page);
1248
dcf5aedb 1249 if (test_bit(PAGE_HEADLESS, &page->private))
1f862989
VW
1250 return false;
1251
1252 zhdr = page_address(page);
1253 z3fold_page_lock(zhdr);
1254 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1255 test_bit(PAGE_STALE, &page->private))
1256 goto out;
1257
4a3ac931
VW
1258 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1259 goto out;
1260
dcf5aedb
VW
1261 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1262 goto out;
1f862989 1263 pool = zhdr_to_pool(zhdr);
4a3ac931
VW
1264 spin_lock(&pool->lock);
1265 if (!list_empty(&zhdr->buddy))
1266 list_del_init(&zhdr->buddy);
4a3ac931
VW
1267 spin_unlock(&pool->lock);
1268
1269 kref_get(&zhdr->refcount);
1270 z3fold_page_unlock(zhdr);
1271 return true;
1f862989 1272
1f862989
VW
1273out:
1274 z3fold_page_unlock(zhdr);
1275 return false;
1276}
1277
68f2736a
MWO
1278static int z3fold_page_migrate(struct page *newpage, struct page *page,
1279 enum migrate_mode mode)
1f862989
VW
1280{
1281 struct z3fold_header *zhdr, *new_zhdr;
1282 struct z3fold_pool *pool;
1f862989 1283
1f862989 1284 VM_BUG_ON_PAGE(!PageIsolated(page), page);
dcf5aedb 1285 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
810481a2 1286 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1f862989
VW
1287
1288 zhdr = page_address(page);
1289 pool = zhdr_to_pool(zhdr);
1290
dcf5aedb 1291 if (!z3fold_page_trylock(zhdr))
1f862989 1292 return -EAGAIN;
4a3ac931 1293 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
dcf5aedb 1294 clear_bit(PAGE_CLAIMED, &page->private);
4a1c3839 1295 z3fold_page_unlock(zhdr);
1f862989
VW
1296 return -EBUSY;
1297 }
c92d2f38
HB
1298 if (work_pending(&zhdr->work)) {
1299 z3fold_page_unlock(zhdr);
1300 return -EAGAIN;
1301 }
1f862989
VW
1302 new_zhdr = page_address(newpage);
1303 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1304 newpage->private = page->private;
943fb61d 1305 set_bit(PAGE_MIGRATED, &page->private);
1f862989
VW
1306 z3fold_page_unlock(zhdr);
1307 spin_lock_init(&new_zhdr->page_lock);
c92d2f38
HB
1308 INIT_WORK(&new_zhdr->work, compact_page_work);
1309 /*
1310 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1311 * so we only have to reinitialize it.
1312 */
1313 INIT_LIST_HEAD(&new_zhdr->buddy);
1f862989 1314 __ClearPageMovable(page);
1f862989
VW
1315
1316 get_page(newpage);
1317 z3fold_page_lock(new_zhdr);
1318 if (new_zhdr->first_chunks)
1319 encode_handle(new_zhdr, FIRST);
1320 if (new_zhdr->last_chunks)
1321 encode_handle(new_zhdr, LAST);
1322 if (new_zhdr->middle_chunks)
1323 encode_handle(new_zhdr, MIDDLE);
1324 set_bit(NEEDS_COMPACTING, &newpage->private);
1325 new_zhdr->cpu = smp_processor_id();
68f2736a 1326 __SetPageMovable(newpage, &z3fold_mops);
1f862989
VW
1327 z3fold_page_unlock(new_zhdr);
1328
1329 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1330
943fb61d
ML
1331 /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1332 page->private = 0;
1f862989
VW
1333 put_page(page);
1334 return 0;
1335}
1336
1337static void z3fold_page_putback(struct page *page)
1338{
1339 struct z3fold_header *zhdr;
1340 struct z3fold_pool *pool;
1341
1342 zhdr = page_address(page);
1343 pool = zhdr_to_pool(zhdr);
1344
1345 z3fold_page_lock(zhdr);
1346 if (!list_empty(&zhdr->buddy))
1347 list_del_init(&zhdr->buddy);
1348 INIT_LIST_HEAD(&page->lru);
5e36c25b 1349 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1f862989 1350 return;
6cf9a349
ML
1351 if (list_empty(&zhdr->buddy))
1352 add_to_unbuddied(pool, zhdr);
dcf5aedb 1353 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1354 z3fold_page_unlock(zhdr);
1355}
1356
68f2736a 1357static const struct movable_operations z3fold_mops = {
1f862989 1358 .isolate_page = z3fold_page_isolate,
68f2736a 1359 .migrate_page = z3fold_page_migrate,
1f862989
VW
1360 .putback_page = z3fold_page_putback,
1361};
1362
9a001fc1
VW
1363/*****************
1364 * zpool
1365 ****************/
1366
35499e2b 1367static void *z3fold_zpool_create(const char *name, gfp_t gfp)
9a001fc1 1368{
e774a7bc 1369 return z3fold_create_pool(name, gfp);
9a001fc1
VW
1370}
1371
1372static void z3fold_zpool_destroy(void *pool)
1373{
1374 z3fold_destroy_pool(pool);
1375}
1376
1377static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1378 unsigned long *handle)
1379{
1380 return z3fold_alloc(pool, size, gfp, handle);
1381}
1382static void z3fold_zpool_free(void *pool, unsigned long handle)
1383{
1384 z3fold_free(pool, handle);
1385}
1386
9a001fc1
VW
1387static void *z3fold_zpool_map(void *pool, unsigned long handle,
1388 enum zpool_mapmode mm)
1389{
1390 return z3fold_map(pool, handle);
1391}
1392static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1393{
1394 z3fold_unmap(pool, handle);
1395}
1396
1397static u64 z3fold_zpool_total_size(void *pool)
1398{
1399 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1400}
1401
1402static struct zpool_driver z3fold_zpool_driver = {
1403 .type = "z3fold",
e818e820 1404 .sleep_mapped = true,
9a001fc1
VW
1405 .owner = THIS_MODULE,
1406 .create = z3fold_zpool_create,
1407 .destroy = z3fold_zpool_destroy,
1408 .malloc = z3fold_zpool_malloc,
1409 .free = z3fold_zpool_free,
9a001fc1
VW
1410 .map = z3fold_zpool_map,
1411 .unmap = z3fold_zpool_unmap,
1412 .total_size = z3fold_zpool_total_size,
1413};
1414
1415MODULE_ALIAS("zpool-z3fold");
1416
1417static int __init init_z3fold(void)
1418{
014284a0
ML
1419 /*
1420 * Make sure the z3fold header is not larger than the page size and
1421 * there has remaining spaces for its buddy.
1422 */
1423 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
9a001fc1
VW
1424 zpool_register_driver(&z3fold_zpool_driver);
1425
1426 return 0;
1427}
1428
1429static void __exit exit_z3fold(void)
1430{
1431 zpool_unregister_driver(&z3fold_zpool_driver);
1432}
1433
1434module_init(init_z3fold);
1435module_exit(exit_z3fold);
1436
1437MODULE_LICENSE("GPL");
1438MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1439MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");