]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/zswap.c
selftests: clarify common error when running gup_test
[thirdparty/linux.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a backend for frontswap that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
23#include <linux/frontswap.h>
24#include <linux/rbtree.h>
25#include <linux/swap.h>
26#include <linux/crypto.h>
1ec3b5fe 27#include <linux/scatterlist.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
2b281117
SJ
31
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
2b281117
SJ
38
39/*********************************
40* statistics
41**********************************/
12d79d64
DS
42/* Total bytes used by the compressed storage */
43static u64 zswap_pool_total_size;
2b281117
SJ
44/* The number of compressed pages currently stored in zswap */
45static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
46/* The number of same-value filled pages currently stored in zswap */
47static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
48
49/*
50 * The statistics below are not protected from concurrent access for
51 * performance reasons so they may not be a 100% accurate. However,
52 * they do provide useful information on roughly how many times a
53 * certain event is occurring.
54*/
55
56/* Pool limit was hit (see zswap_max_pool_percent) */
57static u64 zswap_pool_limit_hit;
58/* Pages written back when pool limit was reached */
59static u64 zswap_written_back_pages;
60/* Store failed due to a reclaim failure after pool limit was reached */
61static u64 zswap_reject_reclaim_fail;
62/* Compressed page was too big for the allocator to (optimally) store */
63static u64 zswap_reject_compress_poor;
64/* Store failed because underlying allocator could not get memory */
65static u64 zswap_reject_alloc_fail;
66/* Store failed because the entry metadata could not be allocated (rare) */
67static u64 zswap_reject_kmemcache_fail;
68/* Duplicate store was encountered (rare) */
69static u64 zswap_duplicate_entry;
70
45190f01
VW
71/* Shrinker work queue */
72static struct workqueue_struct *shrink_wq;
73/* Pool limit was hit, we need to calm down */
74static bool zswap_pool_reached_full;
75
2b281117
SJ
76/*********************************
77* tunables
78**********************************/
c00ed16a 79
bae21db8
DS
80#define ZSWAP_PARAM_UNSET ""
81
bb8b93b5
MS
82/* Enable/disable zswap */
83static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
84static int zswap_enabled_param_set(const char *,
85 const struct kernel_param *);
83aed6cd 86static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
87 .set = zswap_enabled_param_set,
88 .get = param_get_bool,
89};
90module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 91
90b0fc26 92/* Crypto compressor to use */
bb8b93b5 93static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
94static int zswap_compressor_param_set(const char *,
95 const struct kernel_param *);
83aed6cd 96static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 97 .set = zswap_compressor_param_set,
c99b42c3
DS
98 .get = param_get_charp,
99 .free = param_free_charp,
90b0fc26
DS
100};
101module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 102 &zswap_compressor, 0644);
2b281117 103
90b0fc26 104/* Compressed storage zpool to use */
bb8b93b5 105static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 106static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 107static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
108 .set = zswap_zpool_param_set,
109 .get = param_get_charp,
110 .free = param_free_charp,
90b0fc26 111};
c99b42c3 112module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 113
90b0fc26
DS
114/* The maximum percentage of memory that the compressed pool can occupy */
115static unsigned int zswap_max_pool_percent = 20;
116module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 117
45190f01
VW
118/* The threshold for accepting new pages after the max_pool_percent was hit */
119static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
120module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
121 uint, 0644);
122
cb325ddd
MS
123/*
124 * Enable/disable handling same-value filled pages (enabled by default).
125 * If disabled every page is considered non-same-value filled.
126 */
a85f878b
SD
127static bool zswap_same_filled_pages_enabled = true;
128module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
129 bool, 0644);
130
cb325ddd
MS
131/* Enable/disable handling non-same-value filled pages (enabled by default) */
132static bool zswap_non_same_filled_pages_enabled = true;
133module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
134 bool, 0644);
135
2b281117 136/*********************************
f1c54846 137* data structures
2b281117 138**********************************/
2b281117 139
1ec3b5fe
BS
140struct crypto_acomp_ctx {
141 struct crypto_acomp *acomp;
142 struct acomp_req *req;
143 struct crypto_wait wait;
144 u8 *dstmem;
145 struct mutex *mutex;
146};
147
f1c54846
DS
148struct zswap_pool {
149 struct zpool *zpool;
1ec3b5fe 150 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
151 struct kref kref;
152 struct list_head list;
45190f01
VW
153 struct work_struct release_work;
154 struct work_struct shrink_work;
cab7a7e5 155 struct hlist_node node;
f1c54846 156 char tfm_name[CRYPTO_MAX_ALG_NAME];
2b281117
SJ
157};
158
2b281117
SJ
159/*
160 * struct zswap_entry
161 *
162 * This structure contains the metadata for tracking a single compressed
163 * page within zswap.
164 *
165 * rbnode - links the entry into red-black tree for the appropriate swap type
f1c54846 166 * offset - the swap offset for the entry. Index into the red-black tree.
2b281117
SJ
167 * refcount - the number of outstanding reference to the entry. This is needed
168 * to protect against premature freeing of the entry by code
6b452516 169 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
170 * for the zswap_tree structure that contains the entry must
171 * be held while changing the refcount. Since the lock must
172 * be held, there is no reason to also make refcount atomic.
2b281117 173 * length - the length in bytes of the compressed page data. Needed during
a85f878b 174 * decompression. For a same value filled page length is 0.
f1c54846
DS
175 * pool - the zswap_pool the entry's data is in
176 * handle - zpool allocation handle that stores the compressed page data
a85f878b 177 * value - value of the same-value filled pages which have same content
2b281117
SJ
178 */
179struct zswap_entry {
180 struct rb_node rbnode;
181 pgoff_t offset;
182 int refcount;
183 unsigned int length;
f1c54846 184 struct zswap_pool *pool;
a85f878b
SD
185 union {
186 unsigned long handle;
187 unsigned long value;
188 };
2b281117
SJ
189};
190
191struct zswap_header {
192 swp_entry_t swpentry;
193};
194
195/*
196 * The tree lock in the zswap_tree struct protects a few things:
197 * - the rbtree
198 * - the refcount field of each entry in the tree
199 */
200struct zswap_tree {
201 struct rb_root rbroot;
202 spinlock_t lock;
2b281117
SJ
203};
204
205static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
206
f1c54846
DS
207/* RCU-protected iteration */
208static LIST_HEAD(zswap_pools);
209/* protects zswap_pools list modification */
210static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
211/* pool counter to provide unique names to zpool */
212static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 213
90b0fc26
DS
214/* used by param callback function */
215static bool zswap_init_started;
216
d7b028f5
DS
217/* fatal error during init */
218static bool zswap_init_failed;
219
ae3d89a7
DS
220/* init completed, but couldn't create the initial pool */
221static bool zswap_has_pool;
222
f1c54846
DS
223/*********************************
224* helpers and fwd declarations
225**********************************/
226
227#define zswap_pool_debug(msg, p) \
228 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
229 zpool_get_type((p)->zpool))
230
231static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
232static int zswap_pool_get(struct zswap_pool *pool);
233static void zswap_pool_put(struct zswap_pool *pool);
234
235static const struct zpool_ops zswap_zpool_ops = {
236 .evict = zswap_writeback_entry
237};
238
239static bool zswap_is_full(void)
240{
ca79b0c2
AK
241 return totalram_pages() * zswap_max_pool_percent / 100 <
242 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
243}
244
45190f01
VW
245static bool zswap_can_accept(void)
246{
247 return totalram_pages() * zswap_accept_thr_percent / 100 *
248 zswap_max_pool_percent / 100 >
249 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
250}
251
f1c54846
DS
252static void zswap_update_total_size(void)
253{
254 struct zswap_pool *pool;
255 u64 total = 0;
256
257 rcu_read_lock();
258
259 list_for_each_entry_rcu(pool, &zswap_pools, list)
260 total += zpool_get_total_size(pool->zpool);
261
262 rcu_read_unlock();
263
264 zswap_pool_total_size = total;
265}
266
2b281117
SJ
267/*********************************
268* zswap entry functions
269**********************************/
270static struct kmem_cache *zswap_entry_cache;
271
dd01d7d8 272static int __init zswap_entry_cache_create(void)
2b281117
SJ
273{
274 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
5d2d42de 275 return zswap_entry_cache == NULL;
2b281117
SJ
276}
277
c119239b 278static void __init zswap_entry_cache_destroy(void)
2b281117
SJ
279{
280 kmem_cache_destroy(zswap_entry_cache);
281}
282
283static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
284{
285 struct zswap_entry *entry;
286 entry = kmem_cache_alloc(zswap_entry_cache, gfp);
287 if (!entry)
288 return NULL;
289 entry->refcount = 1;
0ab0abcf 290 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
291 return entry;
292}
293
294static void zswap_entry_cache_free(struct zswap_entry *entry)
295{
296 kmem_cache_free(zswap_entry_cache, entry);
297}
298
2b281117
SJ
299/*********************************
300* rbtree functions
301**********************************/
302static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
303{
304 struct rb_node *node = root->rb_node;
305 struct zswap_entry *entry;
306
307 while (node) {
308 entry = rb_entry(node, struct zswap_entry, rbnode);
309 if (entry->offset > offset)
310 node = node->rb_left;
311 else if (entry->offset < offset)
312 node = node->rb_right;
313 else
314 return entry;
315 }
316 return NULL;
317}
318
319/*
320 * In the case that a entry with the same offset is found, a pointer to
321 * the existing entry is stored in dupentry and the function returns -EEXIST
322 */
323static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
324 struct zswap_entry **dupentry)
325{
326 struct rb_node **link = &root->rb_node, *parent = NULL;
327 struct zswap_entry *myentry;
328
329 while (*link) {
330 parent = *link;
331 myentry = rb_entry(parent, struct zswap_entry, rbnode);
332 if (myentry->offset > entry->offset)
333 link = &(*link)->rb_left;
334 else if (myentry->offset < entry->offset)
335 link = &(*link)->rb_right;
336 else {
337 *dupentry = myentry;
338 return -EEXIST;
339 }
340 }
341 rb_link_node(&entry->rbnode, parent, link);
342 rb_insert_color(&entry->rbnode, root);
343 return 0;
344}
345
0ab0abcf
WY
346static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
347{
348 if (!RB_EMPTY_NODE(&entry->rbnode)) {
349 rb_erase(&entry->rbnode, root);
350 RB_CLEAR_NODE(&entry->rbnode);
351 }
352}
353
354/*
12d79d64 355 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
356 * freeing the entry itself, and decrementing the number of stored pages.
357 */
60105e12 358static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 359{
a85f878b
SD
360 if (!entry->length)
361 atomic_dec(&zswap_same_filled_pages);
362 else {
363 zpool_free(entry->pool->zpool, entry->handle);
364 zswap_pool_put(entry->pool);
365 }
0ab0abcf
WY
366 zswap_entry_cache_free(entry);
367 atomic_dec(&zswap_stored_pages);
f1c54846 368 zswap_update_total_size();
0ab0abcf
WY
369}
370
371/* caller must hold the tree lock */
372static void zswap_entry_get(struct zswap_entry *entry)
373{
374 entry->refcount++;
375}
376
377/* caller must hold the tree lock
378* remove from the tree and free it, if nobody reference the entry
379*/
380static void zswap_entry_put(struct zswap_tree *tree,
381 struct zswap_entry *entry)
382{
383 int refcount = --entry->refcount;
384
385 BUG_ON(refcount < 0);
386 if (refcount == 0) {
387 zswap_rb_erase(&tree->rbroot, entry);
60105e12 388 zswap_free_entry(entry);
0ab0abcf
WY
389 }
390}
391
392/* caller must hold the tree lock */
393static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
394 pgoff_t offset)
395{
b0c9865f 396 struct zswap_entry *entry;
0ab0abcf
WY
397
398 entry = zswap_rb_search(root, offset);
399 if (entry)
400 zswap_entry_get(entry);
401
402 return entry;
403}
404
2b281117
SJ
405/*********************************
406* per-cpu code
407**********************************/
408static DEFINE_PER_CPU(u8 *, zswap_dstmem);
1ec3b5fe
BS
409/*
410 * If users dynamically change the zpool type and compressor at runtime, i.e.
411 * zswap is running, zswap can have more than one zpool on one cpu, but they
412 * are sharing dtsmem. So we need this mutex to be per-cpu.
413 */
414static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
2b281117 415
ad7ed770 416static int zswap_dstmem_prepare(unsigned int cpu)
2b281117 417{
1ec3b5fe 418 struct mutex *mutex;
2b281117
SJ
419 u8 *dst;
420
ad7ed770 421 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
2b2695f5 422 if (!dst)
ad7ed770 423 return -ENOMEM;
2b2695f5 424
1ec3b5fe
BS
425 mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
426 if (!mutex) {
427 kfree(dst);
428 return -ENOMEM;
429 }
430
431 mutex_init(mutex);
ad7ed770 432 per_cpu(zswap_dstmem, cpu) = dst;
1ec3b5fe 433 per_cpu(zswap_mutex, cpu) = mutex;
ad7ed770 434 return 0;
2b281117
SJ
435}
436
ad7ed770 437static int zswap_dstmem_dead(unsigned int cpu)
2b281117 438{
1ec3b5fe 439 struct mutex *mutex;
ad7ed770 440 u8 *dst;
2b281117 441
1ec3b5fe
BS
442 mutex = per_cpu(zswap_mutex, cpu);
443 kfree(mutex);
444 per_cpu(zswap_mutex, cpu) = NULL;
445
ad7ed770
SAS
446 dst = per_cpu(zswap_dstmem, cpu);
447 kfree(dst);
448 per_cpu(zswap_dstmem, cpu) = NULL;
f1c54846 449
f1c54846 450 return 0;
f1c54846
DS
451}
452
cab7a7e5 453static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 454{
cab7a7e5 455 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
456 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
457 struct crypto_acomp *acomp;
458 struct acomp_req *req;
459
460 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
461 if (IS_ERR(acomp)) {
462 pr_err("could not alloc crypto acomp %s : %ld\n",
463 pool->tfm_name, PTR_ERR(acomp));
464 return PTR_ERR(acomp);
465 }
466 acomp_ctx->acomp = acomp;
f1c54846 467
1ec3b5fe
BS
468 req = acomp_request_alloc(acomp_ctx->acomp);
469 if (!req) {
470 pr_err("could not alloc crypto acomp_request %s\n",
471 pool->tfm_name);
472 crypto_free_acomp(acomp_ctx->acomp);
cab7a7e5
SAS
473 return -ENOMEM;
474 }
1ec3b5fe
BS
475 acomp_ctx->req = req;
476
477 crypto_init_wait(&acomp_ctx->wait);
478 /*
479 * if the backend of acomp is async zip, crypto_req_done() will wakeup
480 * crypto_wait_req(); if the backend of acomp is scomp, the callback
481 * won't be called, crypto_wait_req() will return without blocking.
482 */
483 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
484 crypto_req_done, &acomp_ctx->wait);
485
486 acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
487 acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
488
2b281117 489 return 0;
2b281117
SJ
490}
491
cab7a7e5 492static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 493{
cab7a7e5 494 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
495 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
496
497 if (!IS_ERR_OR_NULL(acomp_ctx)) {
498 if (!IS_ERR_OR_NULL(acomp_ctx->req))
499 acomp_request_free(acomp_ctx->req);
500 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
501 crypto_free_acomp(acomp_ctx->acomp);
502 }
f1c54846 503
cab7a7e5 504 return 0;
f1c54846
DS
505}
506
2b281117 507/*********************************
f1c54846 508* pool functions
2b281117 509**********************************/
f1c54846
DS
510
511static struct zswap_pool *__zswap_pool_current(void)
2b281117 512{
f1c54846
DS
513 struct zswap_pool *pool;
514
515 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
516 WARN_ONCE(!pool && zswap_has_pool,
517 "%s: no page storage pool!\n", __func__);
f1c54846
DS
518
519 return pool;
520}
521
522static struct zswap_pool *zswap_pool_current(void)
523{
524 assert_spin_locked(&zswap_pools_lock);
525
526 return __zswap_pool_current();
527}
528
529static struct zswap_pool *zswap_pool_current_get(void)
530{
531 struct zswap_pool *pool;
532
533 rcu_read_lock();
534
535 pool = __zswap_pool_current();
ae3d89a7 536 if (!zswap_pool_get(pool))
f1c54846
DS
537 pool = NULL;
538
539 rcu_read_unlock();
540
541 return pool;
542}
543
544static struct zswap_pool *zswap_pool_last_get(void)
545{
546 struct zswap_pool *pool, *last = NULL;
547
548 rcu_read_lock();
549
550 list_for_each_entry_rcu(pool, &zswap_pools, list)
551 last = pool;
ae3d89a7
DS
552 WARN_ONCE(!last && zswap_has_pool,
553 "%s: no page storage pool!\n", __func__);
554 if (!zswap_pool_get(last))
f1c54846
DS
555 last = NULL;
556
557 rcu_read_unlock();
558
559 return last;
560}
561
8bc8b228 562/* type and compressor must be null-terminated */
f1c54846
DS
563static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
564{
565 struct zswap_pool *pool;
566
567 assert_spin_locked(&zswap_pools_lock);
568
569 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 570 if (strcmp(pool->tfm_name, compressor))
f1c54846 571 continue;
8bc8b228 572 if (strcmp(zpool_get_type(pool->zpool), type))
f1c54846
DS
573 continue;
574 /* if we can't get it, it's about to be destroyed */
575 if (!zswap_pool_get(pool))
576 continue;
577 return pool;
578 }
579
580 return NULL;
581}
582
45190f01
VW
583static void shrink_worker(struct work_struct *w)
584{
585 struct zswap_pool *pool = container_of(w, typeof(*pool),
586 shrink_work);
587
588 if (zpool_shrink(pool->zpool, 1, NULL))
589 zswap_reject_reclaim_fail++;
590 zswap_pool_put(pool);
591}
592
f1c54846
DS
593static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
594{
595 struct zswap_pool *pool;
32a4e169 596 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 597 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 598 int ret;
f1c54846 599
bae21db8
DS
600 if (!zswap_has_pool) {
601 /* if either are unset, pool initialization failed, and we
602 * need both params to be set correctly before trying to
603 * create a pool.
604 */
605 if (!strcmp(type, ZSWAP_PARAM_UNSET))
606 return NULL;
607 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
608 return NULL;
609 }
610
f1c54846 611 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 612 if (!pool)
f1c54846 613 return NULL;
f1c54846 614
32a4e169
DS
615 /* unique name for each pool specifically required by zsmalloc */
616 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
617
618 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
f1c54846
DS
619 if (!pool->zpool) {
620 pr_err("%s zpool not available\n", type);
621 goto error;
622 }
623 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
624
79cd4202 625 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
626
627 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
628 if (!pool->acomp_ctx) {
f1c54846
DS
629 pr_err("percpu alloc failed\n");
630 goto error;
631 }
632
cab7a7e5
SAS
633 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
634 &pool->node);
635 if (ret)
f1c54846
DS
636 goto error;
637 pr_debug("using %s compressor\n", pool->tfm_name);
638
639 /* being the current pool takes 1 ref; this func expects the
640 * caller to always add the new pool as the current pool
641 */
642 kref_init(&pool->kref);
643 INIT_LIST_HEAD(&pool->list);
45190f01 644 INIT_WORK(&pool->shrink_work, shrink_worker);
f1c54846
DS
645
646 zswap_pool_debug("created", pool);
647
648 return pool;
649
650error:
1ec3b5fe
BS
651 if (pool->acomp_ctx)
652 free_percpu(pool->acomp_ctx);
f1c54846
DS
653 if (pool->zpool)
654 zpool_destroy_pool(pool->zpool);
655 kfree(pool);
656 return NULL;
657}
658
c99b42c3 659static __init struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 660{
bae21db8
DS
661 bool has_comp, has_zpool;
662
1ec3b5fe 663 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
664 if (!has_comp && strcmp(zswap_compressor,
665 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 666 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 667 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 668 param_free_charp(&zswap_compressor);
bb8b93b5 669 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 670 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 671 }
bae21db8
DS
672 if (!has_comp) {
673 pr_err("default compressor %s not available\n",
674 zswap_compressor);
675 param_free_charp(&zswap_compressor);
676 zswap_compressor = ZSWAP_PARAM_UNSET;
677 }
678
679 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
680 if (!has_zpool && strcmp(zswap_zpool_type,
681 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 682 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 683 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 684 param_free_charp(&zswap_zpool_type);
bb8b93b5 685 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 686 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 687 }
bae21db8
DS
688 if (!has_zpool) {
689 pr_err("default zpool %s not available\n",
690 zswap_zpool_type);
691 param_free_charp(&zswap_zpool_type);
692 zswap_zpool_type = ZSWAP_PARAM_UNSET;
693 }
694
695 if (!has_comp || !has_zpool)
696 return NULL;
f1c54846
DS
697
698 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
699}
700
701static void zswap_pool_destroy(struct zswap_pool *pool)
702{
703 zswap_pool_debug("destroying", pool);
704
cab7a7e5 705 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 706 free_percpu(pool->acomp_ctx);
f1c54846
DS
707 zpool_destroy_pool(pool->zpool);
708 kfree(pool);
709}
710
711static int __must_check zswap_pool_get(struct zswap_pool *pool)
712{
ae3d89a7
DS
713 if (!pool)
714 return 0;
715
f1c54846
DS
716 return kref_get_unless_zero(&pool->kref);
717}
718
200867af 719static void __zswap_pool_release(struct work_struct *work)
f1c54846 720{
45190f01
VW
721 struct zswap_pool *pool = container_of(work, typeof(*pool),
722 release_work);
200867af
DS
723
724 synchronize_rcu();
f1c54846
DS
725
726 /* nobody should have been able to get a kref... */
727 WARN_ON(kref_get_unless_zero(&pool->kref));
728
729 /* pool is now off zswap_pools list and has no references. */
730 zswap_pool_destroy(pool);
731}
732
733static void __zswap_pool_empty(struct kref *kref)
734{
735 struct zswap_pool *pool;
736
737 pool = container_of(kref, typeof(*pool), kref);
738
739 spin_lock(&zswap_pools_lock);
740
741 WARN_ON(pool == zswap_pool_current());
742
743 list_del_rcu(&pool->list);
200867af 744
45190f01
VW
745 INIT_WORK(&pool->release_work, __zswap_pool_release);
746 schedule_work(&pool->release_work);
f1c54846
DS
747
748 spin_unlock(&zswap_pools_lock);
749}
750
751static void zswap_pool_put(struct zswap_pool *pool)
752{
753 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
754}
755
90b0fc26
DS
756/*********************************
757* param callbacks
758**********************************/
759
c99b42c3 760/* val must be a null-terminated string */
90b0fc26
DS
761static int __zswap_param_set(const char *val, const struct kernel_param *kp,
762 char *type, char *compressor)
763{
764 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 765 char *s = strstrip((char *)val);
90b0fc26
DS
766 int ret;
767
d7b028f5
DS
768 if (zswap_init_failed) {
769 pr_err("can't set param, initialization failed\n");
770 return -ENODEV;
771 }
772
c99b42c3 773 /* no change required */
ae3d89a7 774 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
c99b42c3 775 return 0;
90b0fc26
DS
776
777 /* if this is load-time (pre-init) param setting,
778 * don't create a pool; that's done during init.
779 */
780 if (!zswap_init_started)
c99b42c3 781 return param_set_charp(s, kp);
90b0fc26
DS
782
783 if (!type) {
c99b42c3
DS
784 if (!zpool_has_pool(s)) {
785 pr_err("zpool %s not available\n", s);
90b0fc26
DS
786 return -ENOENT;
787 }
c99b42c3 788 type = s;
90b0fc26 789 } else if (!compressor) {
1ec3b5fe 790 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 791 pr_err("compressor %s not available\n", s);
90b0fc26
DS
792 return -ENOENT;
793 }
c99b42c3
DS
794 compressor = s;
795 } else {
796 WARN_ON(1);
797 return -EINVAL;
90b0fc26
DS
798 }
799
800 spin_lock(&zswap_pools_lock);
801
802 pool = zswap_pool_find_get(type, compressor);
803 if (pool) {
804 zswap_pool_debug("using existing", pool);
fd5bb66c 805 WARN_ON(pool == zswap_pool_current());
90b0fc26 806 list_del_rcu(&pool->list);
90b0fc26
DS
807 }
808
fd5bb66c
DS
809 spin_unlock(&zswap_pools_lock);
810
811 if (!pool)
812 pool = zswap_pool_create(type, compressor);
813
90b0fc26 814 if (pool)
c99b42c3 815 ret = param_set_charp(s, kp);
90b0fc26
DS
816 else
817 ret = -EINVAL;
818
fd5bb66c
DS
819 spin_lock(&zswap_pools_lock);
820
90b0fc26
DS
821 if (!ret) {
822 put_pool = zswap_pool_current();
823 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 824 zswap_has_pool = true;
90b0fc26
DS
825 } else if (pool) {
826 /* add the possibly pre-existing pool to the end of the pools
827 * list; if it's new (and empty) then it'll be removed and
828 * destroyed by the put after we drop the lock
829 */
830 list_add_tail_rcu(&pool->list, &zswap_pools);
831 put_pool = pool;
fd5bb66c
DS
832 }
833
834 spin_unlock(&zswap_pools_lock);
835
836 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
837 /* if initial pool creation failed, and this pool creation also
838 * failed, maybe both compressor and zpool params were bad.
839 * Allow changing this param, so pool creation will succeed
840 * when the other param is changed. We already verified this
1ec3b5fe 841 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
842 * checks above.
843 */
844 ret = param_set_charp(s, kp);
90b0fc26
DS
845 }
846
90b0fc26
DS
847 /* drop the ref from either the old current pool,
848 * or the new pool we failed to add
849 */
850 if (put_pool)
851 zswap_pool_put(put_pool);
852
853 return ret;
854}
855
856static int zswap_compressor_param_set(const char *val,
857 const struct kernel_param *kp)
858{
859 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
860}
861
862static int zswap_zpool_param_set(const char *val,
863 const struct kernel_param *kp)
864{
865 return __zswap_param_set(val, kp, NULL, zswap_compressor);
866}
867
d7b028f5
DS
868static int zswap_enabled_param_set(const char *val,
869 const struct kernel_param *kp)
870{
871 if (zswap_init_failed) {
872 pr_err("can't enable, initialization failed\n");
873 return -ENODEV;
874 }
ae3d89a7
DS
875 if (!zswap_has_pool && zswap_init_started) {
876 pr_err("can't enable, no pool configured\n");
877 return -ENODEV;
878 }
d7b028f5
DS
879
880 return param_set_bool(val, kp);
881}
882
2b281117
SJ
883/*********************************
884* writeback code
885**********************************/
886/* return enum for zswap_get_swap_cache_page */
887enum zswap_get_swap_ret {
888 ZSWAP_SWAPCACHE_NEW,
889 ZSWAP_SWAPCACHE_EXIST,
67d13fe8 890 ZSWAP_SWAPCACHE_FAIL,
2b281117
SJ
891};
892
893/*
894 * zswap_get_swap_cache_page
895 *
896 * This is an adaption of read_swap_cache_async()
897 *
898 * This function tries to find a page with the given swap entry
899 * in the swapper_space address space (the swap cache). If the page
900 * is found, it is returned in retpage. Otherwise, a page is allocated,
901 * added to the swap cache, and returned in retpage.
902 *
903 * If success, the swap cache page is returned in retpage
67d13fe8
WY
904 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
905 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
906 * the new page is added to swapcache and locked
907 * Returns ZSWAP_SWAPCACHE_FAIL on error
2b281117
SJ
908 */
909static int zswap_get_swap_cache_page(swp_entry_t entry,
910 struct page **retpage)
911{
5b999aad 912 bool page_was_allocated;
2b281117 913
5b999aad
DS
914 *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
915 NULL, 0, &page_was_allocated);
916 if (page_was_allocated)
917 return ZSWAP_SWAPCACHE_NEW;
918 if (!*retpage)
67d13fe8 919 return ZSWAP_SWAPCACHE_FAIL;
2b281117
SJ
920 return ZSWAP_SWAPCACHE_EXIST;
921}
922
923/*
924 * Attempts to free an entry by adding a page to the swap cache,
925 * decompressing the entry data into the page, and issuing a
926 * bio write to write the page back to the swap device.
927 *
928 * This can be thought of as a "resumed writeback" of the page
929 * to the swap device. We are basically resuming the same swap
930 * writeback path that was intercepted with the frontswap_store()
931 * in the first place. After the page has been decompressed into
932 * the swap cache, the compressed version stored by zswap can be
933 * freed.
934 */
12d79d64 935static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
2b281117
SJ
936{
937 struct zswap_header *zhdr;
938 swp_entry_t swpentry;
939 struct zswap_tree *tree;
940 pgoff_t offset;
941 struct zswap_entry *entry;
942 struct page *page;
1ec3b5fe
BS
943 struct scatterlist input, output;
944 struct crypto_acomp_ctx *acomp_ctx;
945
fc6697a8 946 u8 *src, *tmp = NULL;
2b281117 947 unsigned int dlen;
0ab0abcf 948 int ret;
2b281117
SJ
949 struct writeback_control wbc = {
950 .sync_mode = WB_SYNC_NONE,
951 };
952
fc6697a8
TT
953 if (!zpool_can_sleep_mapped(pool)) {
954 tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC);
955 if (!tmp)
956 return -ENOMEM;
957 }
958
2b281117 959 /* extract swpentry from data */
12d79d64 960 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
2b281117 961 swpentry = zhdr->swpentry; /* here */
2b281117
SJ
962 tree = zswap_trees[swp_type(swpentry)];
963 offset = swp_offset(swpentry);
2b281117
SJ
964
965 /* find and ref zswap entry */
966 spin_lock(&tree->lock);
0ab0abcf 967 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117
SJ
968 if (!entry) {
969 /* entry was invalidated */
970 spin_unlock(&tree->lock);
068619e3 971 zpool_unmap_handle(pool, handle);
fc6697a8 972 kfree(tmp);
2b281117
SJ
973 return 0;
974 }
2b281117
SJ
975 spin_unlock(&tree->lock);
976 BUG_ON(offset != entry->offset);
977
46b76f2e
ML
978 src = (u8 *)zhdr + sizeof(struct zswap_header);
979 if (!zpool_can_sleep_mapped(pool)) {
980 memcpy(tmp, src, entry->length);
981 src = tmp;
982 zpool_unmap_handle(pool, handle);
983 }
984
2b281117
SJ
985 /* try to allocate swap cache page */
986 switch (zswap_get_swap_cache_page(swpentry, &page)) {
67d13fe8 987 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
2b281117
SJ
988 ret = -ENOMEM;
989 goto fail;
990
67d13fe8 991 case ZSWAP_SWAPCACHE_EXIST:
2b281117 992 /* page is already in the swap cache, ignore for now */
09cbfeaf 993 put_page(page);
2b281117
SJ
994 ret = -EEXIST;
995 goto fail;
996
997 case ZSWAP_SWAPCACHE_NEW: /* page is locked */
998 /* decompress */
1ec3b5fe 999 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
2b281117 1000 dlen = PAGE_SIZE;
fc6697a8 1001
1ec3b5fe
BS
1002 mutex_lock(acomp_ctx->mutex);
1003 sg_init_one(&input, src, entry->length);
1004 sg_init_table(&output, 1);
1005 sg_set_page(&output, page, PAGE_SIZE, 0);
1006 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1007 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1008 dlen = acomp_ctx->req->dlen;
1009 mutex_unlock(acomp_ctx->mutex);
1010
2b281117
SJ
1011 BUG_ON(ret);
1012 BUG_ON(dlen != PAGE_SIZE);
1013
1014 /* page is up to date */
1015 SetPageUptodate(page);
1016 }
1017
b349acc7
WY
1018 /* move it to the tail of the inactive list after end_writeback */
1019 SetPageReclaim(page);
1020
2b281117
SJ
1021 /* start writeback */
1022 __swap_writepage(page, &wbc, end_swap_bio_write);
09cbfeaf 1023 put_page(page);
2b281117
SJ
1024 zswap_written_back_pages++;
1025
1026 spin_lock(&tree->lock);
2b281117 1027 /* drop local reference */
0ab0abcf 1028 zswap_entry_put(tree, entry);
2b281117
SJ
1029
1030 /*
0ab0abcf
WY
1031 * There are two possible situations for entry here:
1032 * (1) refcount is 1(normal case), entry is valid and on the tree
1033 * (2) refcount is 0, entry is freed and not on the tree
1034 * because invalidate happened during writeback
1035 * search the tree and free the entry if find entry
1036 */
1037 if (entry == zswap_rb_search(&tree->rbroot, offset))
1038 zswap_entry_put(tree, entry);
2b281117 1039 spin_unlock(&tree->lock);
2b281117 1040
0ab0abcf
WY
1041 goto end;
1042
1043 /*
1044 * if we get here due to ZSWAP_SWAPCACHE_EXIST
c0c641d7
RD
1045 * a load may be happening concurrently.
1046 * it is safe and okay to not free the entry.
0ab0abcf 1047 * if we free the entry in the following put
c0c641d7 1048 * it is also okay to return !0
0ab0abcf 1049 */
2b281117
SJ
1050fail:
1051 spin_lock(&tree->lock);
0ab0abcf 1052 zswap_entry_put(tree, entry);
2b281117 1053 spin_unlock(&tree->lock);
0ab0abcf
WY
1054
1055end:
fc6697a8
TT
1056 if (zpool_can_sleep_mapped(pool))
1057 zpool_unmap_handle(pool, handle);
1058 else
1059 kfree(tmp);
1060
2b281117
SJ
1061 return ret;
1062}
1063
a85f878b
SD
1064static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1065{
1066 unsigned int pos;
1067 unsigned long *page;
1068
1069 page = (unsigned long *)ptr;
1070 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
1071 if (page[pos] != page[0])
1072 return 0;
1073 }
1074 *value = page[0];
1075 return 1;
1076}
1077
1078static void zswap_fill_page(void *ptr, unsigned long value)
1079{
1080 unsigned long *page;
1081
1082 page = (unsigned long *)ptr;
1083 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1084}
1085
2b281117
SJ
1086/*********************************
1087* frontswap hooks
1088**********************************/
1089/* attempts to compress and store an single page */
1090static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1091 struct page *page)
1092{
1093 struct zswap_tree *tree = zswap_trees[type];
1094 struct zswap_entry *entry, *dupentry;
1ec3b5fe
BS
1095 struct scatterlist input, output;
1096 struct crypto_acomp_ctx *acomp_ctx;
2b281117 1097 int ret;
9c3760eb 1098 unsigned int hlen, dlen = PAGE_SIZE;
a85f878b 1099 unsigned long handle, value;
2b281117
SJ
1100 char *buf;
1101 u8 *src, *dst;
9c3760eb 1102 struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
d2fcd82b 1103 gfp_t gfp;
2b281117 1104
7ba71669
HY
1105 /* THP isn't supported */
1106 if (PageTransHuge(page)) {
1107 ret = -EINVAL;
1108 goto reject;
1109 }
1110
c00ed16a 1111 if (!zswap_enabled || !tree) {
2b281117
SJ
1112 ret = -ENODEV;
1113 goto reject;
1114 }
1115
1116 /* reclaim space if needed */
1117 if (zswap_is_full()) {
45190f01
VW
1118 struct zswap_pool *pool;
1119
2b281117 1120 zswap_pool_limit_hit++;
45190f01
VW
1121 zswap_pool_reached_full = true;
1122 pool = zswap_pool_last_get();
1123 if (pool)
1124 queue_work(shrink_wq, &pool->shrink_work);
1125 ret = -ENOMEM;
1126 goto reject;
1127 }
16e536ef 1128
45190f01
VW
1129 if (zswap_pool_reached_full) {
1130 if (!zswap_can_accept()) {
16e536ef
LW
1131 ret = -ENOMEM;
1132 goto reject;
45190f01
VW
1133 } else
1134 zswap_pool_reached_full = false;
2b281117
SJ
1135 }
1136
1137 /* allocate entry */
1138 entry = zswap_entry_cache_alloc(GFP_KERNEL);
1139 if (!entry) {
1140 zswap_reject_kmemcache_fail++;
1141 ret = -ENOMEM;
1142 goto reject;
1143 }
1144
a85f878b
SD
1145 if (zswap_same_filled_pages_enabled) {
1146 src = kmap_atomic(page);
1147 if (zswap_is_page_same_filled(src, &value)) {
1148 kunmap_atomic(src);
1149 entry->offset = offset;
1150 entry->length = 0;
1151 entry->value = value;
1152 atomic_inc(&zswap_same_filled_pages);
1153 goto insert_entry;
1154 }
1155 kunmap_atomic(src);
1156 }
1157
cb325ddd
MS
1158 if (!zswap_non_same_filled_pages_enabled) {
1159 ret = -EINVAL;
1160 goto freepage;
1161 }
1162
f1c54846
DS
1163 /* if entry is successfully added, it keeps the reference */
1164 entry->pool = zswap_pool_current_get();
1165 if (!entry->pool) {
1166 ret = -EINVAL;
1167 goto freepage;
1168 }
1169
2b281117 1170 /* compress */
1ec3b5fe
BS
1171 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1172
1173 mutex_lock(acomp_ctx->mutex);
1174
1175 dst = acomp_ctx->dstmem;
1176 sg_init_table(&input, 1);
1177 sg_set_page(&input, page, PAGE_SIZE, 0);
1178
1179 /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1180 sg_init_one(&output, dst, PAGE_SIZE * 2);
1181 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1182 /*
1183 * it maybe looks a little bit silly that we send an asynchronous request,
1184 * then wait for its completion synchronously. This makes the process look
1185 * synchronous in fact.
1186 * Theoretically, acomp supports users send multiple acomp requests in one
1187 * acomp instance, then get those requests done simultaneously. but in this
1188 * case, frontswap actually does store and load page by page, there is no
1189 * existing method to send the second page before the first page is done
1190 * in one thread doing frontswap.
1191 * but in different threads running on different cpu, we have different
1192 * acomp instance, so multiple threads can do (de)compression in parallel.
1193 */
1194 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1195 dlen = acomp_ctx->req->dlen;
1196
2b281117
SJ
1197 if (ret) {
1198 ret = -EINVAL;
f1c54846 1199 goto put_dstmem;
2b281117
SJ
1200 }
1201
1202 /* store */
9c3760eb 1203 hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
d2fcd82b
HZ
1204 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1205 if (zpool_malloc_support_movable(entry->pool->zpool))
1206 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1207 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
2b281117
SJ
1208 if (ret == -ENOSPC) {
1209 zswap_reject_compress_poor++;
f1c54846 1210 goto put_dstmem;
2b281117
SJ
1211 }
1212 if (ret) {
1213 zswap_reject_alloc_fail++;
f1c54846 1214 goto put_dstmem;
2b281117 1215 }
ae34af1f 1216 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
9c3760eb
YZ
1217 memcpy(buf, &zhdr, hlen);
1218 memcpy(buf + hlen, dst, dlen);
f1c54846 1219 zpool_unmap_handle(entry->pool->zpool, handle);
1ec3b5fe 1220 mutex_unlock(acomp_ctx->mutex);
2b281117
SJ
1221
1222 /* populate entry */
1223 entry->offset = offset;
1224 entry->handle = handle;
1225 entry->length = dlen;
1226
a85f878b 1227insert_entry:
2b281117
SJ
1228 /* map */
1229 spin_lock(&tree->lock);
1230 do {
1231 ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
1232 if (ret == -EEXIST) {
1233 zswap_duplicate_entry++;
1234 /* remove from rbtree */
0ab0abcf
WY
1235 zswap_rb_erase(&tree->rbroot, dupentry);
1236 zswap_entry_put(tree, dupentry);
2b281117
SJ
1237 }
1238 } while (ret == -EEXIST);
1239 spin_unlock(&tree->lock);
1240
1241 /* update stats */
1242 atomic_inc(&zswap_stored_pages);
f1c54846 1243 zswap_update_total_size();
2b281117
SJ
1244
1245 return 0;
1246
f1c54846 1247put_dstmem:
1ec3b5fe 1248 mutex_unlock(acomp_ctx->mutex);
f1c54846
DS
1249 zswap_pool_put(entry->pool);
1250freepage:
2b281117
SJ
1251 zswap_entry_cache_free(entry);
1252reject:
1253 return ret;
1254}
1255
1256/*
1257 * returns 0 if the page was successfully decompressed
1258 * return -1 on entry not found or error
1259*/
1260static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1261 struct page *page)
1262{
1263 struct zswap_tree *tree = zswap_trees[type];
1264 struct zswap_entry *entry;
1ec3b5fe
BS
1265 struct scatterlist input, output;
1266 struct crypto_acomp_ctx *acomp_ctx;
fc6697a8 1267 u8 *src, *dst, *tmp;
2b281117 1268 unsigned int dlen;
0ab0abcf 1269 int ret;
2b281117
SJ
1270
1271 /* find */
1272 spin_lock(&tree->lock);
0ab0abcf 1273 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117
SJ
1274 if (!entry) {
1275 /* entry was written back */
1276 spin_unlock(&tree->lock);
1277 return -1;
1278 }
2b281117
SJ
1279 spin_unlock(&tree->lock);
1280
a85f878b
SD
1281 if (!entry->length) {
1282 dst = kmap_atomic(page);
1283 zswap_fill_page(dst, entry->value);
1284 kunmap_atomic(dst);
fc6697a8 1285 ret = 0;
a85f878b
SD
1286 goto freeentry;
1287 }
1288
fc6697a8
TT
1289 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1290
1291 tmp = kmalloc(entry->length, GFP_ATOMIC);
1292 if (!tmp) {
1293 ret = -ENOMEM;
1294 goto freeentry;
1295 }
1296 }
1297
2b281117
SJ
1298 /* decompress */
1299 dlen = PAGE_SIZE;
9c3760eb
YZ
1300 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1301 if (zpool_evictable(entry->pool->zpool))
1302 src += sizeof(struct zswap_header);
1ec3b5fe 1303
fc6697a8
TT
1304 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1305
1306 memcpy(tmp, src, entry->length);
1307 src = tmp;
1308
1309 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1310 }
1311
1ec3b5fe
BS
1312 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1313 mutex_lock(acomp_ctx->mutex);
1314 sg_init_one(&input, src, entry->length);
1315 sg_init_table(&output, 1);
1316 sg_set_page(&output, page, PAGE_SIZE, 0);
1317 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
1318 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
1319 mutex_unlock(acomp_ctx->mutex);
1320
fc6697a8
TT
1321 if (zpool_can_sleep_mapped(entry->pool->zpool))
1322 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1323 else
1324 kfree(tmp);
1325
2b281117
SJ
1326 BUG_ON(ret);
1327
a85f878b 1328freeentry:
2b281117 1329 spin_lock(&tree->lock);
0ab0abcf 1330 zswap_entry_put(tree, entry);
2b281117
SJ
1331 spin_unlock(&tree->lock);
1332
fc6697a8 1333 return ret;
2b281117
SJ
1334}
1335
1336/* frees an entry in zswap */
1337static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1338{
1339 struct zswap_tree *tree = zswap_trees[type];
1340 struct zswap_entry *entry;
2b281117
SJ
1341
1342 /* find */
1343 spin_lock(&tree->lock);
1344 entry = zswap_rb_search(&tree->rbroot, offset);
1345 if (!entry) {
1346 /* entry was written back */
1347 spin_unlock(&tree->lock);
1348 return;
1349 }
1350
1351 /* remove from rbtree */
0ab0abcf 1352 zswap_rb_erase(&tree->rbroot, entry);
2b281117
SJ
1353
1354 /* drop the initial reference from entry creation */
0ab0abcf 1355 zswap_entry_put(tree, entry);
2b281117
SJ
1356
1357 spin_unlock(&tree->lock);
2b281117
SJ
1358}
1359
1360/* frees all zswap entries for the given swap type */
1361static void zswap_frontswap_invalidate_area(unsigned type)
1362{
1363 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1364 struct zswap_entry *entry, *n;
2b281117
SJ
1365
1366 if (!tree)
1367 return;
1368
1369 /* walk the tree and free everything */
1370 spin_lock(&tree->lock);
0ab0abcf 1371 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1372 zswap_free_entry(entry);
2b281117
SJ
1373 tree->rbroot = RB_ROOT;
1374 spin_unlock(&tree->lock);
aa9bca05
WY
1375 kfree(tree);
1376 zswap_trees[type] = NULL;
2b281117
SJ
1377}
1378
2b281117
SJ
1379static void zswap_frontswap_init(unsigned type)
1380{
1381 struct zswap_tree *tree;
1382
9cd1f701 1383 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
60105e12
MK
1384 if (!tree) {
1385 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1386 return;
1387 }
1388
2b281117
SJ
1389 tree->rbroot = RB_ROOT;
1390 spin_lock_init(&tree->lock);
1391 zswap_trees[type] = tree;
2b281117
SJ
1392}
1393
1da0d94a 1394static const struct frontswap_ops zswap_frontswap_ops = {
2b281117
SJ
1395 .store = zswap_frontswap_store,
1396 .load = zswap_frontswap_load,
1397 .invalidate_page = zswap_frontswap_invalidate_page,
1398 .invalidate_area = zswap_frontswap_invalidate_area,
1399 .init = zswap_frontswap_init
1400};
1401
1402/*********************************
1403* debugfs functions
1404**********************************/
1405#ifdef CONFIG_DEBUG_FS
1406#include <linux/debugfs.h>
1407
1408static struct dentry *zswap_debugfs_root;
1409
1410static int __init zswap_debugfs_init(void)
1411{
1412 if (!debugfs_initialized())
1413 return -ENODEV;
1414
1415 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1416
0825a6f9
JP
1417 debugfs_create_u64("pool_limit_hit", 0444,
1418 zswap_debugfs_root, &zswap_pool_limit_hit);
1419 debugfs_create_u64("reject_reclaim_fail", 0444,
1420 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1421 debugfs_create_u64("reject_alloc_fail", 0444,
1422 zswap_debugfs_root, &zswap_reject_alloc_fail);
1423 debugfs_create_u64("reject_kmemcache_fail", 0444,
1424 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1425 debugfs_create_u64("reject_compress_poor", 0444,
1426 zswap_debugfs_root, &zswap_reject_compress_poor);
1427 debugfs_create_u64("written_back_pages", 0444,
1428 zswap_debugfs_root, &zswap_written_back_pages);
1429 debugfs_create_u64("duplicate_entry", 0444,
1430 zswap_debugfs_root, &zswap_duplicate_entry);
1431 debugfs_create_u64("pool_total_size", 0444,
1432 zswap_debugfs_root, &zswap_pool_total_size);
1433 debugfs_create_atomic_t("stored_pages", 0444,
1434 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1435 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1436 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1437
1438 return 0;
1439}
2b281117
SJ
1440#else
1441static int __init zswap_debugfs_init(void)
1442{
1443 return 0;
1444}
2b281117
SJ
1445#endif
1446
1447/*********************************
1448* module init and exit
1449**********************************/
1450static int __init init_zswap(void)
1451{
f1c54846 1452 struct zswap_pool *pool;
ad7ed770 1453 int ret;
60105e12 1454
90b0fc26
DS
1455 zswap_init_started = true;
1456
2b281117
SJ
1457 if (zswap_entry_cache_create()) {
1458 pr_err("entry cache creation failed\n");
f1c54846 1459 goto cache_fail;
2b281117 1460 }
f1c54846 1461
ad7ed770
SAS
1462 ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1463 zswap_dstmem_prepare, zswap_dstmem_dead);
1464 if (ret) {
f1c54846
DS
1465 pr_err("dstmem alloc failed\n");
1466 goto dstmem_fail;
2b281117 1467 }
f1c54846 1468
cab7a7e5
SAS
1469 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1470 "mm/zswap_pool:prepare",
1471 zswap_cpu_comp_prepare,
1472 zswap_cpu_comp_dead);
1473 if (ret)
1474 goto hp_fail;
1475
f1c54846 1476 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1477 if (pool) {
1478 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1479 zpool_get_type(pool->zpool));
1480 list_add(&pool->list, &zswap_pools);
1481 zswap_has_pool = true;
1482 } else {
f1c54846 1483 pr_err("pool creation failed\n");
ae3d89a7 1484 zswap_enabled = false;
2b281117 1485 }
60105e12 1486
45190f01
VW
1487 shrink_wq = create_workqueue("zswap-shrink");
1488 if (!shrink_wq)
1489 goto fallback_fail;
1490
1da0d94a
CH
1491 ret = frontswap_register_ops(&zswap_frontswap_ops);
1492 if (ret)
1493 goto destroy_wq;
2b281117
SJ
1494 if (zswap_debugfs_init())
1495 pr_warn("debugfs initialization failed\n");
1496 return 0;
f1c54846 1497
1da0d94a
CH
1498destroy_wq:
1499 destroy_workqueue(shrink_wq);
45190f01 1500fallback_fail:
38aeb071
DC
1501 if (pool)
1502 zswap_pool_destroy(pool);
cab7a7e5 1503hp_fail:
ad7ed770 1504 cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
f1c54846 1505dstmem_fail:
c119239b 1506 zswap_entry_cache_destroy();
f1c54846 1507cache_fail:
d7b028f5
DS
1508 /* if built-in, we aren't unloaded on failure; don't allow use */
1509 zswap_init_failed = true;
1510 zswap_enabled = false;
2b281117
SJ
1511 return -ENOMEM;
1512}
1513/* must be late so crypto has time to come up */
1514late_initcall(init_zswap);
1515
1516MODULE_LICENSE("GPL");
68386da8 1517MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
2b281117 1518MODULE_DESCRIPTION("Compressed cache for swap pages");