1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/preempt.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/zpool.h>
42 #include <linux/kmemleak.h>
45 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
46 * adjusting internal fragmentation. It also determines the number of
47 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
49 * in the beginning of an allocated page are occupied by z3fold header, so
50 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
51 * which shows the max number of free chunks in z3fold page, also there will
52 * be 63, or 62, respectively, freelists per pool.
54 #define NCHUNKS_ORDER 6
56 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
57 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
58 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
59 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
60 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
61 #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
63 #define BUDDY_MASK (0x3)
65 #define SLOTS_ALIGN (0x40)
80 struct z3fold_buddy_slots
{
82 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
83 * be enough slots to hold all possible variants
85 unsigned long slot
[BUDDY_MASK
+ 1];
86 unsigned long pool
; /* back link */
89 #define HANDLE_FLAG_MASK (0x03)
92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
93 * z3fold page, except for HEADLESS pages
94 * @buddy: links the z3fold page into the relevant list in the
96 * @page_lock: per-page lock
97 * @refcount: reference count for the z3fold page
98 * @work: work_struct for page layout optimization
99 * @slots: pointer to the structure holding buddy slots
100 * @pool: pointer to the containing pool
101 * @cpu: CPU which this page "belongs" to
102 * @first_chunks: the size of the first buddy in chunks, 0 if free
103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
104 * @last_chunks: the size of the last buddy in chunks, 0 if free
105 * @first_num: the starting number (for the first handle)
106 * @mapped_count: the number of objects currently mapped
108 struct z3fold_header
{
109 struct list_head buddy
;
110 spinlock_t page_lock
;
111 struct kref refcount
;
112 struct work_struct work
;
113 struct z3fold_buddy_slots
*slots
;
114 struct z3fold_pool
*pool
;
116 unsigned short first_chunks
;
117 unsigned short middle_chunks
;
118 unsigned short last_chunks
;
119 unsigned short start_middle
;
120 unsigned short first_num
:2;
121 unsigned short mapped_count
:2;
122 unsigned short foreign_handles
:2;
126 * struct z3fold_pool - stores metadata for each z3fold pool
128 * @lock: protects pool unbuddied lists
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
133 * @stale: list of pages marked for freeing
134 * @pages_nr: number of z3fold pages in the pool.
135 * @c_handle: cache for z3fold_buddy_slots allocation
136 * @compact_wq: workqueue for page layout background optimization
137 * @release_wq: workqueue for safe page release
138 * @work: work_struct for safe page release
140 * This structure is allocated at pool creation time and maintains metadata
141 * pertaining to a particular z3fold pool.
146 spinlock_t stale_lock
;
147 struct list_head
*unbuddied
;
148 struct list_head stale
;
150 struct kmem_cache
*c_handle
;
151 struct workqueue_struct
*compact_wq
;
152 struct workqueue_struct
*release_wq
;
153 struct work_struct work
;
157 * Internal z3fold page flags
159 enum z3fold_page_flags
{
164 PAGE_CLAIMED
, /* by either reclaim or free */
165 PAGE_MIGRATED
, /* page is migrated and soon to be released */
169 * handle flags, go under HANDLE_FLAG_MASK
171 enum z3fold_handle_flags
{
176 * Forward declarations
178 static struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*, size_t, bool);
179 static void compact_page_work(struct work_struct
*w
);
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size
)
188 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
191 #define for_each_unbuddied_list(_iter, _begin) \
192 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
194 static inline struct z3fold_buddy_slots
*alloc_slots(struct z3fold_pool
*pool
,
197 struct z3fold_buddy_slots
*slots
= kmem_cache_zalloc(pool
->c_handle
,
201 /* It will be freed separately in free_handle(). */
202 kmemleak_not_leak(slots
);
203 slots
->pool
= (unsigned long)pool
;
204 rwlock_init(&slots
->lock
);
210 static inline struct z3fold_pool
*slots_to_pool(struct z3fold_buddy_slots
*s
)
212 return (struct z3fold_pool
*)(s
->pool
& ~HANDLE_FLAG_MASK
);
215 static inline struct z3fold_buddy_slots
*handle_to_slots(unsigned long handle
)
217 return (struct z3fold_buddy_slots
*)(handle
& ~(SLOTS_ALIGN
- 1));
220 /* Lock a z3fold page */
221 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
223 spin_lock(&zhdr
->page_lock
);
226 /* Try to lock a z3fold page */
227 static inline int z3fold_page_trylock(struct z3fold_header
*zhdr
)
229 return spin_trylock(&zhdr
->page_lock
);
232 /* Unlock a z3fold page */
233 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
235 spin_unlock(&zhdr
->page_lock
);
238 /* return locked z3fold page if it's not headless */
239 static inline struct z3fold_header
*get_z3fold_header(unsigned long handle
)
241 struct z3fold_buddy_slots
*slots
;
242 struct z3fold_header
*zhdr
;
245 if (!(handle
& (1 << PAGE_HEADLESS
))) {
246 slots
= handle_to_slots(handle
);
250 read_lock(&slots
->lock
);
251 addr
= *(unsigned long *)handle
;
252 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
253 locked
= z3fold_page_trylock(zhdr
);
254 read_unlock(&slots
->lock
);
256 struct page
*page
= virt_to_page(zhdr
);
258 if (!test_bit(PAGE_MIGRATED
, &page
->private))
260 z3fold_page_unlock(zhdr
);
265 zhdr
= (struct z3fold_header
*)(handle
& PAGE_MASK
);
271 static inline void put_z3fold_header(struct z3fold_header
*zhdr
)
273 struct page
*page
= virt_to_page(zhdr
);
275 if (!test_bit(PAGE_HEADLESS
, &page
->private))
276 z3fold_page_unlock(zhdr
);
279 static inline void free_handle(unsigned long handle
, struct z3fold_header
*zhdr
)
281 struct z3fold_buddy_slots
*slots
;
285 if (WARN_ON(*(unsigned long *)handle
== 0))
288 slots
= handle_to_slots(handle
);
289 write_lock(&slots
->lock
);
290 *(unsigned long *)handle
= 0;
292 if (test_bit(HANDLES_NOFREE
, &slots
->pool
)) {
293 write_unlock(&slots
->lock
);
294 return; /* simple case, nothing else to do */
297 if (zhdr
->slots
!= slots
)
298 zhdr
->foreign_handles
--;
301 for (i
= 0; i
<= BUDDY_MASK
; i
++) {
302 if (slots
->slot
[i
]) {
307 write_unlock(&slots
->lock
);
310 struct z3fold_pool
*pool
= slots_to_pool(slots
);
312 if (zhdr
->slots
== slots
)
314 kmem_cache_free(pool
->c_handle
, slots
);
318 /* Initializes the z3fold header of a newly allocated z3fold page */
319 static struct z3fold_header
*init_z3fold_page(struct page
*page
, bool headless
,
320 struct z3fold_pool
*pool
, gfp_t gfp
)
322 struct z3fold_header
*zhdr
= page_address(page
);
323 struct z3fold_buddy_slots
*slots
;
325 clear_bit(PAGE_HEADLESS
, &page
->private);
326 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
327 clear_bit(NEEDS_COMPACTING
, &page
->private);
328 clear_bit(PAGE_STALE
, &page
->private);
329 clear_bit(PAGE_CLAIMED
, &page
->private);
330 clear_bit(PAGE_MIGRATED
, &page
->private);
334 slots
= alloc_slots(pool
, gfp
);
338 memset(zhdr
, 0, sizeof(*zhdr
));
339 spin_lock_init(&zhdr
->page_lock
);
340 kref_init(&zhdr
->refcount
);
344 INIT_LIST_HEAD(&zhdr
->buddy
);
345 INIT_WORK(&zhdr
->work
, compact_page_work
);
349 /* Resets the struct page fields and frees the page */
350 static void free_z3fold_page(struct page
*page
, bool headless
)
354 __ClearPageMovable(page
);
360 /* Helper function to build the index */
361 static inline int __idx(struct z3fold_header
*zhdr
, enum buddy bud
)
363 return (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
367 * Encodes the handle of a particular buddy within a z3fold page
368 * Pool lock should be held as this function accesses first_num
370 static unsigned long __encode_handle(struct z3fold_header
*zhdr
,
371 struct z3fold_buddy_slots
*slots
,
374 unsigned long h
= (unsigned long)zhdr
;
378 * For a headless page, its handle is its pointer with the extra
379 * PAGE_HEADLESS bit set
382 return h
| (1 << PAGE_HEADLESS
);
384 /* otherwise, return pointer to encoded handle */
385 idx
= __idx(zhdr
, bud
);
388 h
|= (zhdr
->last_chunks
<< BUDDY_SHIFT
);
390 write_lock(&slots
->lock
);
391 slots
->slot
[idx
] = h
;
392 write_unlock(&slots
->lock
);
393 return (unsigned long)&slots
->slot
[idx
];
396 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
398 return __encode_handle(zhdr
, zhdr
->slots
, bud
);
401 /* only for LAST bud, returns zero otherwise */
402 static unsigned short handle_to_chunks(unsigned long handle
)
404 struct z3fold_buddy_slots
*slots
= handle_to_slots(handle
);
407 read_lock(&slots
->lock
);
408 addr
= *(unsigned long *)handle
;
409 read_unlock(&slots
->lock
);
410 return (addr
& ~PAGE_MASK
) >> BUDDY_SHIFT
;
414 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
415 * but that doesn't matter. because the masking will result in the
416 * correct buddy number.
418 static enum buddy
handle_to_buddy(unsigned long handle
)
420 struct z3fold_header
*zhdr
;
421 struct z3fold_buddy_slots
*slots
= handle_to_slots(handle
);
424 read_lock(&slots
->lock
);
425 WARN_ON(handle
& (1 << PAGE_HEADLESS
));
426 addr
= *(unsigned long *)handle
;
427 read_unlock(&slots
->lock
);
428 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
429 return (addr
- zhdr
->first_num
) & BUDDY_MASK
;
432 static inline struct z3fold_pool
*zhdr_to_pool(struct z3fold_header
*zhdr
)
437 static void __release_z3fold_page(struct z3fold_header
*zhdr
, bool locked
)
439 struct page
*page
= virt_to_page(zhdr
);
440 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
442 WARN_ON(!list_empty(&zhdr
->buddy
));
443 set_bit(PAGE_STALE
, &page
->private);
444 clear_bit(NEEDS_COMPACTING
, &page
->private);
445 spin_lock(&pool
->lock
);
446 spin_unlock(&pool
->lock
);
449 z3fold_page_unlock(zhdr
);
451 spin_lock(&pool
->stale_lock
);
452 list_add(&zhdr
->buddy
, &pool
->stale
);
453 queue_work(pool
->release_wq
, &pool
->work
);
454 spin_unlock(&pool
->stale_lock
);
456 atomic64_dec(&pool
->pages_nr
);
459 static void release_z3fold_page_locked(struct kref
*ref
)
461 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
463 WARN_ON(z3fold_page_trylock(zhdr
));
464 __release_z3fold_page(zhdr
, true);
467 static void release_z3fold_page_locked_list(struct kref
*ref
)
469 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
471 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
473 spin_lock(&pool
->lock
);
474 list_del_init(&zhdr
->buddy
);
475 spin_unlock(&pool
->lock
);
477 WARN_ON(z3fold_page_trylock(zhdr
));
478 __release_z3fold_page(zhdr
, true);
481 static inline int put_z3fold_locked(struct z3fold_header
*zhdr
)
483 return kref_put(&zhdr
->refcount
, release_z3fold_page_locked
);
486 static inline int put_z3fold_locked_list(struct z3fold_header
*zhdr
)
488 return kref_put(&zhdr
->refcount
, release_z3fold_page_locked_list
);
491 static void free_pages_work(struct work_struct
*w
)
493 struct z3fold_pool
*pool
= container_of(w
, struct z3fold_pool
, work
);
495 spin_lock(&pool
->stale_lock
);
496 while (!list_empty(&pool
->stale
)) {
497 struct z3fold_header
*zhdr
= list_first_entry(&pool
->stale
,
498 struct z3fold_header
, buddy
);
499 struct page
*page
= virt_to_page(zhdr
);
501 list_del(&zhdr
->buddy
);
502 if (WARN_ON(!test_bit(PAGE_STALE
, &page
->private)))
504 spin_unlock(&pool
->stale_lock
);
505 cancel_work_sync(&zhdr
->work
);
506 free_z3fold_page(page
, false);
508 spin_lock(&pool
->stale_lock
);
510 spin_unlock(&pool
->stale_lock
);
514 * Returns the number of free chunks in a z3fold page.
515 * NB: can't be used with HEADLESS pages.
517 static int num_free_chunks(struct z3fold_header
*zhdr
)
521 * If there is a middle object, pick up the bigger free space
522 * either before or after it. Otherwise just subtract the number
523 * of chunks occupied by the first and the last objects.
525 if (zhdr
->middle_chunks
!= 0) {
526 int nfree_before
= zhdr
->first_chunks
?
527 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
528 int nfree_after
= zhdr
->last_chunks
?
530 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
531 nfree
= max(nfree_before
, nfree_after
);
533 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
537 /* Add to the appropriate unbuddied list */
538 static inline void add_to_unbuddied(struct z3fold_pool
*pool
,
539 struct z3fold_header
*zhdr
)
541 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
542 zhdr
->middle_chunks
== 0) {
543 struct list_head
*unbuddied
;
544 int freechunks
= num_free_chunks(zhdr
);
547 unbuddied
= this_cpu_ptr(pool
->unbuddied
);
548 spin_lock(&pool
->lock
);
549 list_add(&zhdr
->buddy
, &unbuddied
[freechunks
]);
550 spin_unlock(&pool
->lock
);
551 zhdr
->cpu
= smp_processor_id();
556 static inline enum buddy
get_free_buddy(struct z3fold_header
*zhdr
, int chunks
)
558 enum buddy bud
= HEADLESS
;
560 if (zhdr
->middle_chunks
) {
561 if (!zhdr
->first_chunks
&&
562 chunks
<= zhdr
->start_middle
- ZHDR_CHUNKS
)
564 else if (!zhdr
->last_chunks
)
567 if (!zhdr
->first_chunks
)
569 else if (!zhdr
->last_chunks
)
578 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
579 unsigned short dst_chunk
)
582 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
583 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
584 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
587 static inline bool buddy_single(struct z3fold_header
*zhdr
)
589 return !((zhdr
->first_chunks
&& zhdr
->middle_chunks
) ||
590 (zhdr
->first_chunks
&& zhdr
->last_chunks
) ||
591 (zhdr
->middle_chunks
&& zhdr
->last_chunks
));
594 static struct z3fold_header
*compact_single_buddy(struct z3fold_header
*zhdr
)
596 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
598 unsigned long old_handle
= 0;
600 struct z3fold_header
*new_zhdr
= NULL
;
601 int first_idx
= __idx(zhdr
, FIRST
);
602 int middle_idx
= __idx(zhdr
, MIDDLE
);
603 int last_idx
= __idx(zhdr
, LAST
);
604 unsigned short *moved_chunks
= NULL
;
607 * No need to protect slots here -- all the slots are "local" and
608 * the page lock is already taken
610 if (zhdr
->first_chunks
&& zhdr
->slots
->slot
[first_idx
]) {
611 p
+= ZHDR_SIZE_ALIGNED
;
612 sz
= zhdr
->first_chunks
<< CHUNK_SHIFT
;
613 old_handle
= (unsigned long)&zhdr
->slots
->slot
[first_idx
];
614 moved_chunks
= &zhdr
->first_chunks
;
615 } else if (zhdr
->middle_chunks
&& zhdr
->slots
->slot
[middle_idx
]) {
616 p
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
617 sz
= zhdr
->middle_chunks
<< CHUNK_SHIFT
;
618 old_handle
= (unsigned long)&zhdr
->slots
->slot
[middle_idx
];
619 moved_chunks
= &zhdr
->middle_chunks
;
620 } else if (zhdr
->last_chunks
&& zhdr
->slots
->slot
[last_idx
]) {
621 p
+= PAGE_SIZE
- (zhdr
->last_chunks
<< CHUNK_SHIFT
);
622 sz
= zhdr
->last_chunks
<< CHUNK_SHIFT
;
623 old_handle
= (unsigned long)&zhdr
->slots
->slot
[last_idx
];
624 moved_chunks
= &zhdr
->last_chunks
;
628 enum buddy new_bud
= HEADLESS
;
629 short chunks
= size_to_chunks(sz
);
632 new_zhdr
= __z3fold_alloc(pool
, sz
, false);
636 if (WARN_ON(new_zhdr
== zhdr
))
639 new_bud
= get_free_buddy(new_zhdr
, chunks
);
643 new_zhdr
->first_chunks
= chunks
;
644 q
+= ZHDR_SIZE_ALIGNED
;
647 new_zhdr
->middle_chunks
= chunks
;
648 new_zhdr
->start_middle
=
649 new_zhdr
->first_chunks
+ ZHDR_CHUNKS
;
650 q
+= new_zhdr
->start_middle
<< CHUNK_SHIFT
;
653 new_zhdr
->last_chunks
= chunks
;
654 q
+= PAGE_SIZE
- (new_zhdr
->last_chunks
<< CHUNK_SHIFT
);
659 new_zhdr
->foreign_handles
++;
661 write_lock(&zhdr
->slots
->lock
);
662 *(unsigned long *)old_handle
= (unsigned long)new_zhdr
+
663 __idx(new_zhdr
, new_bud
);
665 *(unsigned long *)old_handle
|=
666 (new_zhdr
->last_chunks
<< BUDDY_SHIFT
);
667 write_unlock(&zhdr
->slots
->lock
);
668 add_to_unbuddied(pool
, new_zhdr
);
669 z3fold_page_unlock(new_zhdr
);
677 if (new_zhdr
&& !put_z3fold_locked(new_zhdr
)) {
678 add_to_unbuddied(pool
, new_zhdr
);
679 z3fold_page_unlock(new_zhdr
);
685 #define BIG_CHUNK_GAP 3
686 /* Has to be called with lock held */
687 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
689 struct page
*page
= virt_to_page(zhdr
);
691 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
692 return 0; /* can't move middle chunk, it's used */
694 if (unlikely(PageIsolated(page
)))
697 if (zhdr
->middle_chunks
== 0)
698 return 0; /* nothing to compact */
700 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
701 /* move to the beginning */
702 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
703 zhdr
->first_chunks
= zhdr
->middle_chunks
;
704 zhdr
->middle_chunks
= 0;
705 zhdr
->start_middle
= 0;
711 * moving data is expensive, so let's only do that if
712 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
714 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
715 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
717 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
718 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
720 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
721 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
722 + zhdr
->middle_chunks
) >=
724 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
726 mchunk_memmove(zhdr
, new_start
);
727 zhdr
->start_middle
= new_start
;
734 static void do_compact_page(struct z3fold_header
*zhdr
, bool locked
)
736 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
739 page
= virt_to_page(zhdr
);
741 WARN_ON(z3fold_page_trylock(zhdr
));
743 z3fold_page_lock(zhdr
);
744 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING
, &page
->private))) {
745 z3fold_page_unlock(zhdr
);
748 spin_lock(&pool
->lock
);
749 list_del_init(&zhdr
->buddy
);
750 spin_unlock(&pool
->lock
);
752 if (put_z3fold_locked(zhdr
))
755 if (test_bit(PAGE_STALE
, &page
->private) ||
756 test_and_set_bit(PAGE_CLAIMED
, &page
->private)) {
757 z3fold_page_unlock(zhdr
);
761 if (!zhdr
->foreign_handles
&& buddy_single(zhdr
) &&
762 zhdr
->mapped_count
== 0 && compact_single_buddy(zhdr
)) {
763 if (!put_z3fold_locked(zhdr
)) {
764 clear_bit(PAGE_CLAIMED
, &page
->private);
765 z3fold_page_unlock(zhdr
);
770 z3fold_compact_page(zhdr
);
771 add_to_unbuddied(pool
, zhdr
);
772 clear_bit(PAGE_CLAIMED
, &page
->private);
773 z3fold_page_unlock(zhdr
);
776 static void compact_page_work(struct work_struct
*w
)
778 struct z3fold_header
*zhdr
= container_of(w
, struct z3fold_header
,
781 do_compact_page(zhdr
, false);
784 /* returns _locked_ z3fold page header or NULL */
785 static inline struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*pool
,
786 size_t size
, bool can_sleep
)
788 struct z3fold_header
*zhdr
= NULL
;
790 struct list_head
*unbuddied
;
791 int chunks
= size_to_chunks(size
), i
;
795 /* First, try to find an unbuddied z3fold page. */
796 unbuddied
= this_cpu_ptr(pool
->unbuddied
);
797 for_each_unbuddied_list(i
, chunks
) {
798 struct list_head
*l
= &unbuddied
[i
];
800 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
801 struct z3fold_header
, buddy
);
806 /* Re-check under lock. */
807 spin_lock(&pool
->lock
);
808 if (unlikely(zhdr
!= list_first_entry(READ_ONCE(l
),
809 struct z3fold_header
, buddy
)) ||
810 !z3fold_page_trylock(zhdr
)) {
811 spin_unlock(&pool
->lock
);
818 list_del_init(&zhdr
->buddy
);
820 spin_unlock(&pool
->lock
);
822 page
= virt_to_page(zhdr
);
823 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
824 test_bit(PAGE_CLAIMED
, &page
->private)) {
825 z3fold_page_unlock(zhdr
);
834 * this page could not be removed from its unbuddied
835 * list while pool lock was held, and then we've taken
836 * page lock so kref_put could not be called before
837 * we got here, so it's safe to just call kref_get()
839 kref_get(&zhdr
->refcount
);
847 /* look for _exact_ match on other cpus' lists */
848 for_each_online_cpu(cpu
) {
851 unbuddied
= per_cpu_ptr(pool
->unbuddied
, cpu
);
852 spin_lock(&pool
->lock
);
853 l
= &unbuddied
[chunks
];
855 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
856 struct z3fold_header
, buddy
);
858 if (!zhdr
|| !z3fold_page_trylock(zhdr
)) {
859 spin_unlock(&pool
->lock
);
863 list_del_init(&zhdr
->buddy
);
865 spin_unlock(&pool
->lock
);
867 page
= virt_to_page(zhdr
);
868 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
869 test_bit(PAGE_CLAIMED
, &page
->private)) {
870 z3fold_page_unlock(zhdr
);
876 kref_get(&zhdr
->refcount
);
881 if (zhdr
&& !zhdr
->slots
) {
882 zhdr
->slots
= alloc_slots(pool
, GFP_ATOMIC
);
889 if (!put_z3fold_locked(zhdr
)) {
890 add_to_unbuddied(pool
, zhdr
);
891 z3fold_page_unlock(zhdr
);
901 * z3fold_create_pool() - create a new z3fold pool
903 * @gfp: gfp flags when allocating the z3fold pool structure
905 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
908 static struct z3fold_pool
*z3fold_create_pool(const char *name
, gfp_t gfp
)
910 struct z3fold_pool
*pool
= NULL
;
913 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
916 pool
->c_handle
= kmem_cache_create("z3fold_handle",
917 sizeof(struct z3fold_buddy_slots
),
918 SLOTS_ALIGN
, 0, NULL
);
921 spin_lock_init(&pool
->lock
);
922 spin_lock_init(&pool
->stale_lock
);
923 pool
->unbuddied
= __alloc_percpu(sizeof(struct list_head
) * NCHUNKS
,
924 __alignof__(struct list_head
));
925 if (!pool
->unbuddied
)
927 for_each_possible_cpu(cpu
) {
928 struct list_head
*unbuddied
=
929 per_cpu_ptr(pool
->unbuddied
, cpu
);
930 for_each_unbuddied_list(i
, 0)
931 INIT_LIST_HEAD(&unbuddied
[i
]);
933 INIT_LIST_HEAD(&pool
->stale
);
934 atomic64_set(&pool
->pages_nr
, 0);
936 pool
->compact_wq
= create_singlethread_workqueue(pool
->name
);
937 if (!pool
->compact_wq
)
939 pool
->release_wq
= create_singlethread_workqueue(pool
->name
);
940 if (!pool
->release_wq
)
942 INIT_WORK(&pool
->work
, free_pages_work
);
946 destroy_workqueue(pool
->compact_wq
);
948 free_percpu(pool
->unbuddied
);
950 kmem_cache_destroy(pool
->c_handle
);
958 * z3fold_destroy_pool() - destroys an existing z3fold pool
959 * @pool: the z3fold pool to be destroyed
961 * The pool should be emptied before this function is called.
963 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
965 kmem_cache_destroy(pool
->c_handle
);
968 * We need to destroy pool->compact_wq before pool->release_wq,
969 * as any pending work on pool->compact_wq will call
970 * queue_work(pool->release_wq, &pool->work).
972 * There are still outstanding pages until both workqueues are drained,
973 * so we cannot unregister migration until then.
976 destroy_workqueue(pool
->compact_wq
);
977 destroy_workqueue(pool
->release_wq
);
978 free_percpu(pool
->unbuddied
);
982 static const struct movable_operations z3fold_mops
;
985 * z3fold_alloc() - allocates a region of a given size
986 * @pool: z3fold pool from which to allocate
987 * @size: size in bytes of the desired allocation
988 * @gfp: gfp flags used if the pool needs to grow
989 * @handle: handle of the new allocation
991 * This function will attempt to find a free region in the pool large enough to
992 * satisfy the allocation request. A search of the unbuddied lists is
993 * performed first. If no suitable free region is found, then a new page is
994 * allocated and added to the pool to satisfy the request.
996 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
997 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1000 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
1001 unsigned long *handle
)
1003 int chunks
= size_to_chunks(size
);
1004 struct z3fold_header
*zhdr
= NULL
;
1005 struct page
*page
= NULL
;
1007 bool can_sleep
= gfpflags_allow_blocking(gfp
);
1009 if (!size
|| (gfp
& __GFP_HIGHMEM
))
1012 if (size
> PAGE_SIZE
)
1015 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
1019 zhdr
= __z3fold_alloc(pool
, size
, can_sleep
);
1021 bud
= get_free_buddy(zhdr
, chunks
);
1022 if (bud
== HEADLESS
) {
1023 if (!put_z3fold_locked(zhdr
))
1024 z3fold_page_unlock(zhdr
);
1025 pr_err("No free chunks in unbuddied\n");
1029 page
= virt_to_page(zhdr
);
1035 page
= alloc_page(gfp
);
1039 zhdr
= init_z3fold_page(page
, bud
== HEADLESS
, pool
, gfp
);
1044 atomic64_inc(&pool
->pages_nr
);
1046 if (bud
== HEADLESS
) {
1047 set_bit(PAGE_HEADLESS
, &page
->private);
1052 __SetPageMovable(page
, &z3fold_mops
);
1055 WARN_ON(!trylock_page(page
));
1056 __SetPageMovable(page
, &z3fold_mops
);
1059 z3fold_page_lock(zhdr
);
1063 zhdr
->first_chunks
= chunks
;
1064 else if (bud
== LAST
)
1065 zhdr
->last_chunks
= chunks
;
1067 zhdr
->middle_chunks
= chunks
;
1068 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
1070 add_to_unbuddied(pool
, zhdr
);
1073 spin_lock(&pool
->lock
);
1074 *handle
= encode_handle(zhdr
, bud
);
1075 spin_unlock(&pool
->lock
);
1076 if (bud
!= HEADLESS
)
1077 z3fold_page_unlock(zhdr
);
1083 * z3fold_free() - frees the allocation associated with the given handle
1084 * @pool: pool in which the allocation resided
1085 * @handle: handle associated with the allocation returned by z3fold_alloc()
1087 * In the case that the z3fold page in which the allocation resides is under
1088 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1089 * only sets the first|middle|last_chunks to 0. The page is actually freed
1090 * once all buddies are evicted (see z3fold_reclaim_page() below).
1092 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
1094 struct z3fold_header
*zhdr
;
1099 zhdr
= get_z3fold_header(handle
);
1100 page
= virt_to_page(zhdr
);
1101 page_claimed
= test_and_set_bit(PAGE_CLAIMED
, &page
->private);
1103 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1104 /* if a headless page is under reclaim, just leave.
1105 * NB: we use test_and_set_bit for a reason: if the bit
1106 * has not been set before, we release this page
1107 * immediately so we don't care about its value any more.
1109 if (!page_claimed
) {
1110 put_z3fold_header(zhdr
);
1111 free_z3fold_page(page
, true);
1112 atomic64_dec(&pool
->pages_nr
);
1117 /* Non-headless case */
1118 bud
= handle_to_buddy(handle
);
1122 zhdr
->first_chunks
= 0;
1125 zhdr
->middle_chunks
= 0;
1128 zhdr
->last_chunks
= 0;
1131 pr_err("%s: unknown bud %d\n", __func__
, bud
);
1133 put_z3fold_header(zhdr
);
1138 free_handle(handle
, zhdr
);
1139 if (put_z3fold_locked_list(zhdr
))
1142 /* the page has not been claimed by us */
1143 put_z3fold_header(zhdr
);
1146 if (test_and_set_bit(NEEDS_COMPACTING
, &page
->private)) {
1147 clear_bit(PAGE_CLAIMED
, &page
->private);
1148 put_z3fold_header(zhdr
);
1151 if (zhdr
->cpu
< 0 || !cpu_online(zhdr
->cpu
)) {
1153 kref_get(&zhdr
->refcount
);
1154 clear_bit(PAGE_CLAIMED
, &page
->private);
1155 do_compact_page(zhdr
, true);
1158 kref_get(&zhdr
->refcount
);
1159 clear_bit(PAGE_CLAIMED
, &page
->private);
1160 queue_work_on(zhdr
->cpu
, pool
->compact_wq
, &zhdr
->work
);
1161 put_z3fold_header(zhdr
);
1165 * z3fold_map() - maps the allocation associated with the given handle
1166 * @pool: pool in which the allocation resides
1167 * @handle: handle associated with the allocation to be mapped
1169 * Extracts the buddy number from handle and constructs the pointer to the
1170 * correct starting chunk within the page.
1172 * Returns: a pointer to the mapped allocation
1174 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
1176 struct z3fold_header
*zhdr
;
1181 zhdr
= get_z3fold_header(handle
);
1183 page
= virt_to_page(zhdr
);
1185 if (test_bit(PAGE_HEADLESS
, &page
->private))
1188 buddy
= handle_to_buddy(handle
);
1191 addr
+= ZHDR_SIZE_ALIGNED
;
1194 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
1195 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1198 addr
+= PAGE_SIZE
- (handle_to_chunks(handle
) << CHUNK_SHIFT
);
1201 pr_err("unknown buddy id %d\n", buddy
);
1208 zhdr
->mapped_count
++;
1210 put_z3fold_header(zhdr
);
1215 * z3fold_unmap() - unmaps the allocation associated with the given handle
1216 * @pool: pool in which the allocation resides
1217 * @handle: handle associated with the allocation to be unmapped
1219 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
1221 struct z3fold_header
*zhdr
;
1225 zhdr
= get_z3fold_header(handle
);
1226 page
= virt_to_page(zhdr
);
1228 if (test_bit(PAGE_HEADLESS
, &page
->private))
1231 buddy
= handle_to_buddy(handle
);
1232 if (buddy
== MIDDLE
)
1233 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1234 zhdr
->mapped_count
--;
1235 put_z3fold_header(zhdr
);
1239 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1240 * @pool: pool whose size is being queried
1242 * Returns: size in pages of the given pool.
1244 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
1246 return atomic64_read(&pool
->pages_nr
);
1249 static bool z3fold_page_isolate(struct page
*page
, isolate_mode_t mode
)
1251 struct z3fold_header
*zhdr
;
1252 struct z3fold_pool
*pool
;
1254 VM_BUG_ON_PAGE(PageIsolated(page
), page
);
1256 if (test_bit(PAGE_HEADLESS
, &page
->private))
1259 zhdr
= page_address(page
);
1260 z3fold_page_lock(zhdr
);
1261 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
1262 test_bit(PAGE_STALE
, &page
->private))
1265 if (zhdr
->mapped_count
!= 0 || zhdr
->foreign_handles
!= 0)
1268 if (test_and_set_bit(PAGE_CLAIMED
, &page
->private))
1270 pool
= zhdr_to_pool(zhdr
);
1271 spin_lock(&pool
->lock
);
1272 if (!list_empty(&zhdr
->buddy
))
1273 list_del_init(&zhdr
->buddy
);
1274 spin_unlock(&pool
->lock
);
1276 kref_get(&zhdr
->refcount
);
1277 z3fold_page_unlock(zhdr
);
1281 z3fold_page_unlock(zhdr
);
1285 static int z3fold_page_migrate(struct page
*newpage
, struct page
*page
,
1286 enum migrate_mode mode
)
1288 struct z3fold_header
*zhdr
, *new_zhdr
;
1289 struct z3fold_pool
*pool
;
1291 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1292 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED
, &page
->private), page
);
1293 VM_BUG_ON_PAGE(!PageLocked(newpage
), newpage
);
1295 zhdr
= page_address(page
);
1296 pool
= zhdr_to_pool(zhdr
);
1298 if (!z3fold_page_trylock(zhdr
))
1300 if (zhdr
->mapped_count
!= 0 || zhdr
->foreign_handles
!= 0) {
1301 clear_bit(PAGE_CLAIMED
, &page
->private);
1302 z3fold_page_unlock(zhdr
);
1305 if (work_pending(&zhdr
->work
)) {
1306 z3fold_page_unlock(zhdr
);
1309 new_zhdr
= page_address(newpage
);
1310 memcpy(new_zhdr
, zhdr
, PAGE_SIZE
);
1311 newpage
->private = page
->private;
1312 set_bit(PAGE_MIGRATED
, &page
->private);
1313 z3fold_page_unlock(zhdr
);
1314 spin_lock_init(&new_zhdr
->page_lock
);
1315 INIT_WORK(&new_zhdr
->work
, compact_page_work
);
1317 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1318 * so we only have to reinitialize it.
1320 INIT_LIST_HEAD(&new_zhdr
->buddy
);
1321 __ClearPageMovable(page
);
1324 z3fold_page_lock(new_zhdr
);
1325 if (new_zhdr
->first_chunks
)
1326 encode_handle(new_zhdr
, FIRST
);
1327 if (new_zhdr
->last_chunks
)
1328 encode_handle(new_zhdr
, LAST
);
1329 if (new_zhdr
->middle_chunks
)
1330 encode_handle(new_zhdr
, MIDDLE
);
1331 set_bit(NEEDS_COMPACTING
, &newpage
->private);
1332 new_zhdr
->cpu
= smp_processor_id();
1333 __SetPageMovable(newpage
, &z3fold_mops
);
1334 z3fold_page_unlock(new_zhdr
);
1336 queue_work_on(new_zhdr
->cpu
, pool
->compact_wq
, &new_zhdr
->work
);
1338 /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
1344 static void z3fold_page_putback(struct page
*page
)
1346 struct z3fold_header
*zhdr
;
1347 struct z3fold_pool
*pool
;
1349 zhdr
= page_address(page
);
1350 pool
= zhdr_to_pool(zhdr
);
1352 z3fold_page_lock(zhdr
);
1353 if (!list_empty(&zhdr
->buddy
))
1354 list_del_init(&zhdr
->buddy
);
1355 INIT_LIST_HEAD(&page
->lru
);
1356 if (put_z3fold_locked(zhdr
))
1358 if (list_empty(&zhdr
->buddy
))
1359 add_to_unbuddied(pool
, zhdr
);
1360 clear_bit(PAGE_CLAIMED
, &page
->private);
1361 z3fold_page_unlock(zhdr
);
1364 static const struct movable_operations z3fold_mops
= {
1365 .isolate_page
= z3fold_page_isolate
,
1366 .migrate_page
= z3fold_page_migrate
,
1367 .putback_page
= z3fold_page_putback
,
1374 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
)
1376 return z3fold_create_pool(name
, gfp
);
1379 static void z3fold_zpool_destroy(void *pool
)
1381 z3fold_destroy_pool(pool
);
1384 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
1385 unsigned long *handle
)
1387 return z3fold_alloc(pool
, size
, gfp
, handle
);
1389 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
1391 z3fold_free(pool
, handle
);
1394 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
1395 enum zpool_mapmode mm
)
1397 return z3fold_map(pool
, handle
);
1399 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
1401 z3fold_unmap(pool
, handle
);
1404 static u64
z3fold_zpool_total_size(void *pool
)
1406 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
1409 static struct zpool_driver z3fold_zpool_driver
= {
1411 .sleep_mapped
= true,
1412 .owner
= THIS_MODULE
,
1413 .create
= z3fold_zpool_create
,
1414 .destroy
= z3fold_zpool_destroy
,
1415 .malloc
= z3fold_zpool_malloc
,
1416 .free
= z3fold_zpool_free
,
1417 .map
= z3fold_zpool_map
,
1418 .unmap
= z3fold_zpool_unmap
,
1419 .total_size
= z3fold_zpool_total_size
,
1422 MODULE_ALIAS("zpool-z3fold");
1424 static int __init
init_z3fold(void)
1427 * Make sure the z3fold header is not larger than the page size and
1428 * there has remaining spaces for its buddy.
1430 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
- CHUNK_SIZE
);
1431 zpool_register_driver(&z3fold_zpool_driver
);
1436 static void __exit
exit_z3fold(void)
1438 zpool_unregister_driver(&z3fold_zpool_driver
);
1441 module_init(init_z3fold
);
1442 module_exit(exit_z3fold
);
1444 MODULE_LICENSE("GPL");
1445 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1446 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");