]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - include/linux/swapops.h
mm/swap: cache maximum swapfile size when init swap
[thirdparty/kernel/stable.git] / include / linux / swapops.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
a2c16d6c
HD
2#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
187f1882 6#include <linux/bug.h>
2b740303 7#include <linux/mm_types.h>
a2c16d6c 8
9b98fa22
CH
9#ifdef CONFIG_MMU
10
2e346877
PX
11#ifdef CONFIG_SWAP
12#include <linux/swapfile.h>
13#endif /* CONFIG_SWAP */
14
1da177e4
LT
15/*
16 * swapcache pages are stored in the swapper_space radix tree. We want to
17 * get good packing density in that tree, so the index should be dense in
18 * the low-order bits.
19 *
a930c210 20 * We arrange the `type' and `offset' fields so that `type' is at the six
e83a9596 21 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
9b15b817 22 * remaining bits. Although `type' itself needs only five bits, we allow for
a930c210 23 * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
1da177e4
LT
24 *
25 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
26 */
3159f943
MW
27#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
28#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
1da177e4 29
0d206b5d
PX
30/*
31 * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
32 * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
33 * can use the extra bits to store other information besides PFN.
34 */
35#ifdef MAX_PHYSMEM_BITS
36#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
37#else /* MAX_PHYSMEM_BITS */
38#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
39#endif /* MAX_PHYSMEM_BITS */
40#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
41
2e346877
PX
42/**
43 * Migration swap entry specific bitfield definitions. Layout:
44 *
45 * |----------+--------------------|
46 * | swp_type | swp_offset |
47 * |----------+--------+-+-+-------|
48 * | | resv |D|A| PFN |
49 * |----------+--------+-+-+-------|
50 *
51 * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
52 * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
53 *
54 * Note: A/D bits will be stored in migration entries iff there're enough
55 * free bits in arch specific swp offset. By default we'll ignore A/D bits
56 * when migrating a page. Please refer to migration_entry_supports_ad()
57 * for more information. If there're more bits besides PFN and A/D bits,
58 * they should be reserved and always be zeros.
59 */
60#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
61#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
62#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
63
64#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
65#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
66
0d206b5d
PX
67static inline bool is_pfn_swap_entry(swp_entry_t entry);
68
099dd687
PX
69/* Clear all flags but only keep swp_entry_t related information */
70static inline pte_t pte_swp_clear_flags(pte_t pte)
71{
1493a191
DH
72 if (pte_swp_exclusive(pte))
73 pte = pte_swp_clear_exclusive(pte);
099dd687
PX
74 if (pte_swp_soft_dirty(pte))
75 pte = pte_swp_clear_soft_dirty(pte);
76 if (pte_swp_uffd_wp(pte))
77 pte = pte_swp_clear_uffd_wp(pte);
78 return pte;
79}
80
1da177e4
LT
81/*
82 * Store a type+offset into a swp_entry_t in an arch-independent format
83 */
84static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
85{
86 swp_entry_t ret;
87
3159f943 88 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
1da177e4
LT
89 return ret;
90}
91
92/*
93 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
94 * arch-independent format
95 */
96static inline unsigned swp_type(swp_entry_t entry)
97{
3159f943 98 return (entry.val >> SWP_TYPE_SHIFT);
1da177e4
LT
99}
100
101/*
102 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
103 * arch-independent format
104 */
105static inline pgoff_t swp_offset(swp_entry_t entry)
106{
3159f943 107 return entry.val & SWP_OFFSET_MASK;
1da177e4
LT
108}
109
0d206b5d
PX
110/*
111 * This should only be called upon a pfn swap entry to get the PFN stored
112 * in the swap entry. Please refers to is_pfn_swap_entry() for definition
113 * of pfn swap entry.
114 */
115static inline unsigned long swp_offset_pfn(swp_entry_t entry)
116{
117 VM_BUG_ON(!is_pfn_swap_entry(entry));
118 return swp_offset(entry) & SWP_PFN_MASK;
119}
120
698dd4ba
MM
121/* check whether a pte points to a swap entry */
122static inline int is_swap_pte(pte_t pte)
123{
21d9ee3e 124 return !pte_none(pte) && !pte_present(pte);
698dd4ba
MM
125}
126
1da177e4
LT
127/*
128 * Convert the arch-dependent pte representation of a swp_entry_t into an
129 * arch-independent swp_entry_t.
130 */
131static inline swp_entry_t pte_to_swp_entry(pte_t pte)
132{
133 swp_entry_t arch_entry;
134
099dd687 135 pte = pte_swp_clear_flags(pte);
1da177e4
LT
136 arch_entry = __pte_to_swp_entry(pte);
137 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
138}
139
140/*
141 * Convert the arch-independent representation of a swp_entry_t into the
142 * arch-dependent pte representation.
143 */
144static inline pte_t swp_entry_to_pte(swp_entry_t entry)
145{
146 swp_entry_t arch_entry;
147
148 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
1da177e4
LT
149 return __swp_entry_to_pte(arch_entry);
150}
0697212a 151
a2c16d6c
HD
152static inline swp_entry_t radix_to_swp_entry(void *arg)
153{
154 swp_entry_t entry;
155
3159f943 156 entry.val = xa_to_value(arg);
a2c16d6c
HD
157 return entry;
158}
159
160static inline void *swp_to_radix_entry(swp_entry_t entry)
161{
3159f943 162 return xa_mk_value(entry.val);
a2c16d6c
HD
163}
164
9f186f9e
ML
165static inline swp_entry_t make_swapin_error_entry(struct page *page)
166{
167 return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
168}
169
170static inline int is_swapin_error_entry(swp_entry_t entry)
171{
172 return swp_type(entry) == SWP_SWAPIN_ERROR;
173}
174
5042db43 175#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
4dd845b5 176static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
5042db43 177{
4dd845b5 178 return swp_entry(SWP_DEVICE_READ, offset);
5042db43
JG
179}
180
4dd845b5 181static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
5042db43 182{
4dd845b5 183 return swp_entry(SWP_DEVICE_WRITE, offset);
5042db43
JG
184}
185
4dd845b5 186static inline bool is_device_private_entry(swp_entry_t entry)
5042db43 187{
4dd845b5
AP
188 int type = swp_type(entry);
189 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
5042db43
JG
190}
191
4dd845b5 192static inline bool is_writable_device_private_entry(swp_entry_t entry)
5042db43
JG
193{
194 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
195}
b756a3b5
AP
196
197static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
198{
199 return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
200}
201
202static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
203{
204 return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
205}
206
207static inline bool is_device_exclusive_entry(swp_entry_t entry)
208{
209 return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
210 swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
211}
212
213static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
214{
215 return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
216}
5042db43 217#else /* CONFIG_DEVICE_PRIVATE */
4dd845b5 218static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
5042db43
JG
219{
220 return swp_entry(0, 0);
221}
222
4dd845b5 223static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
5042db43 224{
4dd845b5 225 return swp_entry(0, 0);
5042db43
JG
226}
227
228static inline bool is_device_private_entry(swp_entry_t entry)
229{
230 return false;
231}
232
4dd845b5 233static inline bool is_writable_device_private_entry(swp_entry_t entry)
5042db43
JG
234{
235 return false;
236}
b756a3b5
AP
237
238static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
239{
240 return swp_entry(0, 0);
241}
242
243static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
244{
245 return swp_entry(0, 0);
246}
247
248static inline bool is_device_exclusive_entry(swp_entry_t entry)
249{
250 return false;
251}
252
253static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
254{
255 return false;
256}
5042db43
JG
257#endif /* CONFIG_DEVICE_PRIVATE */
258
0697212a 259#ifdef CONFIG_MIGRATION
0697212a
CL
260static inline int is_migration_entry(swp_entry_t entry)
261{
262 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
6c287605 263 swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
0697212a
CL
264 swp_type(entry) == SWP_MIGRATION_WRITE);
265}
266
4dd845b5 267static inline int is_writable_migration_entry(swp_entry_t entry)
0697212a
CL
268{
269 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
270}
271
6c287605
DH
272static inline int is_readable_migration_entry(swp_entry_t entry)
273{
274 return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
275}
276
277static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
278{
279 return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
280}
281
4dd845b5 282static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
0697212a 283{
4dd845b5
AP
284 return swp_entry(SWP_MIGRATION_READ, offset);
285}
286
6c287605
DH
287static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
288{
289 return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
290}
291
4dd845b5
AP
292static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
293{
294 return swp_entry(SWP_MIGRATION_WRITE, offset);
0697212a
CL
295}
296
2e346877
PX
297/*
298 * Returns whether the host has large enough swap offset field to support
299 * carrying over pgtable A/D bits for page migrations. The result is
300 * pretty much arch specific.
301 */
302static inline bool migration_entry_supports_ad(void)
303{
304 /*
305 * max_swapfile_size() returns the max supported swp-offset plus 1.
306 * We can support the migration A/D bits iff the pfn swap entry has
307 * the offset large enough to cover all of them (PFN, A & D bits).
308 */
309#ifdef CONFIG_SWAP
be45a490 310 return swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS);
2e346877
PX
311#else /* CONFIG_SWAP */
312 return false;
313#endif /* CONFIG_SWAP */
314}
315
316static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
317{
318 if (migration_entry_supports_ad())
319 return swp_entry(swp_type(entry),
320 swp_offset(entry) | SWP_MIG_YOUNG);
321 return entry;
322}
323
324static inline bool is_migration_entry_young(swp_entry_t entry)
325{
326 if (migration_entry_supports_ad())
327 return swp_offset(entry) & SWP_MIG_YOUNG;
328 /* Keep the old behavior of aging page after migration */
329 return false;
330}
331
332static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
333{
334 if (migration_entry_supports_ad())
335 return swp_entry(swp_type(entry),
336 swp_offset(entry) | SWP_MIG_DIRTY);
337 return entry;
338}
339
340static inline bool is_migration_entry_dirty(swp_entry_t entry)
341{
342 if (migration_entry_supports_ad())
343 return swp_offset(entry) & SWP_MIG_DIRTY;
344 /* Keep the old behavior of clean page after migration */
345 return false;
346}
347
e66f17ff
NH
348extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
349 spinlock_t *ptl);
0697212a
CL
350extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
351 unsigned long address);
ad1ac596
ML
352#ifdef CONFIG_HUGETLB_PAGE
353extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
354extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
eba4d770
PX
355#endif /* CONFIG_HUGETLB_PAGE */
356#else /* CONFIG_MIGRATION */
4dd845b5
AP
357static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
358{
359 return swp_entry(0, 0);
360}
361
6c287605
DH
362static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
363{
364 return swp_entry(0, 0);
365}
366
4dd845b5
AP
367static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
368{
369 return swp_entry(0, 0);
370}
0697212a 371
5ec553a9
AM
372static inline int is_migration_entry(swp_entry_t swp)
373{
374 return 0;
375}
0d665e7b 376
e66f17ff
NH
377static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
378 spinlock_t *ptl) { }
0697212a
CL
379static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
380 unsigned long address) { }
ad1ac596
ML
381#ifdef CONFIG_HUGETLB_PAGE
382static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
383static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
eba4d770 384#endif /* CONFIG_HUGETLB_PAGE */
4dd845b5 385static inline int is_writable_migration_entry(swp_entry_t entry)
0697212a
CL
386{
387 return 0;
388}
6c287605
DH
389static inline int is_readable_migration_entry(swp_entry_t entry)
390{
391 return 0;
392}
0697212a 393
2e346877
PX
394static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
395{
396 return entry;
397}
398
399static inline bool is_migration_entry_young(swp_entry_t entry)
400{
401 return false;
402}
403
404static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
405{
406 return entry;
407}
408
409static inline bool is_migration_entry_dirty(swp_entry_t entry)
410{
411 return false;
412}
eba4d770 413#endif /* CONFIG_MIGRATION */
0697212a 414
679d1033
PX
415typedef unsigned long pte_marker;
416
1db9dbc2
PX
417#define PTE_MARKER_UFFD_WP BIT(0)
418#define PTE_MARKER_MASK (PTE_MARKER_UFFD_WP)
679d1033
PX
419
420#ifdef CONFIG_PTE_MARKER
421
422static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
423{
424 return swp_entry(SWP_PTE_MARKER, marker);
425}
426
427static inline bool is_pte_marker_entry(swp_entry_t entry)
428{
429 return swp_type(entry) == SWP_PTE_MARKER;
430}
431
432static inline pte_marker pte_marker_get(swp_entry_t entry)
433{
434 return swp_offset(entry) & PTE_MARKER_MASK;
435}
436
437static inline bool is_pte_marker(pte_t pte)
438{
439 return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
440}
441
442#else /* CONFIG_PTE_MARKER */
443
444static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
445{
446 /* This should never be called if !CONFIG_PTE_MARKER */
447 WARN_ON_ONCE(1);
448 return swp_entry(0, 0);
449}
450
451static inline bool is_pte_marker_entry(swp_entry_t entry)
452{
453 return false;
454}
455
456static inline pte_marker pte_marker_get(swp_entry_t entry)
457{
458 return 0;
459}
460
461static inline bool is_pte_marker(pte_t pte)
462{
463 return false;
464}
465
466#endif /* CONFIG_PTE_MARKER */
467
468static inline pte_t make_pte_marker(pte_marker marker)
469{
470 return swp_entry_to_pte(make_pte_marker_entry(marker));
471}
472
473/*
474 * This is a special version to check pte_none() just to cover the case when
475 * the pte is a pte marker. It existed because in many cases the pte marker
476 * should be seen as a none pte; it's just that we have stored some information
477 * onto the none pte so it becomes not-none any more.
478 *
479 * It should be used when the pte is file-backed, ram-based and backing
480 * userspace pages, like shmem. It is not needed upon pgtables that do not
481 * support pte markers at all. For example, it's not needed on anonymous
482 * memory, kernel-only memory (including when the system is during-boot),
483 * non-ram based generic file-system. It's fine to be used even there, but the
484 * extra pte marker check will be pure overhead.
485 *
486 * For systems configured with !CONFIG_PTE_MARKER this will be automatically
487 * optimized to pte_none().
488 */
489static inline int pte_none_mostly(pte_t pte)
490{
491 return pte_none(pte) || is_pte_marker(pte);
492}
493
af5cdaf8
AP
494static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
495{
0d206b5d 496 struct page *p = pfn_to_page(swp_offset_pfn(entry));
af5cdaf8
AP
497
498 /*
499 * Any use of migration entries may only occur while the
500 * corresponding page is locked
501 */
502 BUG_ON(is_migration_entry(entry) && !PageLocked(p));
503
504 return p;
505}
506
507/*
508 * A pfn swap entry is a special type of swap entry that always has a pfn stored
509 * in the swap offset. They are used to represent unaddressable device memory
510 * and to restrict access to a page undergoing migration.
511 */
512static inline bool is_pfn_swap_entry(swp_entry_t entry)
513{
0d206b5d
PX
514 /* Make sure the swp offset can always store the needed fields */
515 BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
516
b756a3b5
AP
517 return is_migration_entry(entry) || is_device_private_entry(entry) ||
518 is_device_exclusive_entry(entry);
af5cdaf8
AP
519}
520
616b8371
ZY
521struct page_vma_mapped_walk;
522
523#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
7f5abe60 524extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
525 struct page *page);
526
527extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
528 struct page *new);
529
530extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
531
532static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
533{
534 swp_entry_t arch_entry;
535
ab6e3d09
NH
536 if (pmd_swp_soft_dirty(pmd))
537 pmd = pmd_swp_clear_soft_dirty(pmd);
8f34f1ea
PX
538 if (pmd_swp_uffd_wp(pmd))
539 pmd = pmd_swp_clear_uffd_wp(pmd);
616b8371
ZY
540 arch_entry = __pmd_to_swp_entry(pmd);
541 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
542}
543
544static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
545{
546 swp_entry_t arch_entry;
547
548 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
549 return __swp_entry_to_pmd(arch_entry);
550}
551
552static inline int is_pmd_migration_entry(pmd_t pmd)
553{
b304c6f0 554 return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
616b8371 555}
eba4d770 556#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
7f5abe60 557static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
558 struct page *page)
559{
560 BUILD_BUG();
561}
562
563static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
564 struct page *new)
565{
566 BUILD_BUG();
567}
568
569static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
570
571static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
572{
573 return swp_entry(0, 0);
574}
575
576static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
577{
578 return __pmd(0);
579}
580
581static inline int is_pmd_migration_entry(pmd_t pmd)
582{
583 return 0;
584}
eba4d770 585#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
616b8371 586
a7420aa5 587#ifdef CONFIG_MEMORY_FAILURE
8e30456b
NH
588
589extern atomic_long_t num_poisoned_pages __read_mostly;
590
a7420aa5
AK
591/*
592 * Support for hardware poisoned pages
593 */
594static inline swp_entry_t make_hwpoison_entry(struct page *page)
595{
596 BUG_ON(!PageLocked(page));
597 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
598}
599
600static inline int is_hwpoison_entry(swp_entry_t entry)
601{
602 return swp_type(entry) == SWP_HWPOISON;
603}
8e30456b
NH
604
605static inline void num_poisoned_pages_inc(void)
606{
607 atomic_long_inc(&num_poisoned_pages);
608}
609
ac5fcde0
NH
610static inline void num_poisoned_pages_sub(long i)
611{
612 atomic_long_sub(i, &num_poisoned_pages);
613}
614
eba4d770 615#else /* CONFIG_MEMORY_FAILURE */
a7420aa5
AK
616
617static inline swp_entry_t make_hwpoison_entry(struct page *page)
618{
619 return swp_entry(0, 0);
620}
621
622static inline int is_hwpoison_entry(swp_entry_t swp)
623{
624 return 0;
625}
da1b13cc 626
da1b13cc
WL
627static inline void num_poisoned_pages_inc(void)
628{
629}
ac5fcde0
NH
630
631static inline void num_poisoned_pages_sub(long i)
632{
633}
eba4d770 634#endif /* CONFIG_MEMORY_FAILURE */
a7420aa5 635
a7420aa5
AK
636static inline int non_swap_entry(swp_entry_t entry)
637{
638 return swp_type(entry) >= MAX_SWAPFILES;
639}
a2c16d6c 640
9b98fa22 641#endif /* CONFIG_MMU */
a2c16d6c 642#endif /* _LINUX_SWAPOPS_H */