]> git.ipfire.org Git - thirdparty/linux.git/blob - include/linux/swap.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[thirdparty/linux.git] / include / linux / swap.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAP_H
3 #define _LINUX_SWAP_H
4
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
17 #include <asm/page.h>
18
19 struct notifier_block;
20
21 struct bio;
22
23 struct pagevec;
24
25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK 0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT 0
28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31
32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
35 #define SWAP_BATCH 64
36
37 static inline int current_is_kswapd(void)
38 {
39 return current->flags & PF_KSWAPD;
40 }
41
42 /*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50 #define MAX_SWAPFILES_SHIFT 5
51
52 /*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
58 /*
59 * Unaddressable device memory support. See include/linux/hmm.h and
60 * Documentation/vm/hmm.rst. Short description is we need struct pages for
61 * device memory that is unaddressable (inaccessible) by CPU, so that we can
62 * migrate part of a process memory to device memory.
63 *
64 * When a page is migrated from CPU to device, we set the CPU page table entry
65 * to a special SWP_DEVICE_{READ|WRITE} entry.
66 *
67 * When a page is mapped by the device for exclusive access we set the CPU page
68 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
69 */
70 #ifdef CONFIG_DEVICE_PRIVATE
71 #define SWP_DEVICE_NUM 4
72 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
73 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
74 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
75 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
76 #else
77 #define SWP_DEVICE_NUM 0
78 #endif
79
80 /*
81 * NUMA node memory migration support
82 */
83 #ifdef CONFIG_MIGRATION
84 #define SWP_MIGRATION_NUM 2
85 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
86 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
87 #else
88 #define SWP_MIGRATION_NUM 0
89 #endif
90
91 /*
92 * Handling of hardware poisoned pages with memory corruption.
93 */
94 #ifdef CONFIG_MEMORY_FAILURE
95 #define SWP_HWPOISON_NUM 1
96 #define SWP_HWPOISON MAX_SWAPFILES
97 #else
98 #define SWP_HWPOISON_NUM 0
99 #endif
100
101 #define MAX_SWAPFILES \
102 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
103 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
104
105 /*
106 * Magic header for a swap area. The first part of the union is
107 * what the swap magic looks like for the old (limited to 128MB)
108 * swap area format, the second part of the union adds - in the
109 * old reserved area - some extra information. Note that the first
110 * kilobyte is reserved for boot loader or disk label stuff...
111 *
112 * Having the magic at the end of the PAGE_SIZE makes detecting swap
113 * areas somewhat tricky on machines that support multiple page sizes.
114 * For 2.5 we'll probably want to move the magic to just beyond the
115 * bootbits...
116 */
117 union swap_header {
118 struct {
119 char reserved[PAGE_SIZE - 10];
120 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
121 } magic;
122 struct {
123 char bootbits[1024]; /* Space for disklabel etc. */
124 __u32 version;
125 __u32 last_page;
126 __u32 nr_badpages;
127 unsigned char sws_uuid[16];
128 unsigned char sws_volume[16];
129 __u32 padding[117];
130 __u32 badpages[1];
131 } info;
132 };
133
134 /*
135 * current->reclaim_state points to one of these when a task is running
136 * memory reclaim
137 */
138 struct reclaim_state {
139 unsigned long reclaimed_slab;
140 };
141
142 #ifdef __KERNEL__
143
144 struct address_space;
145 struct sysinfo;
146 struct writeback_control;
147 struct zone;
148
149 /*
150 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
151 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
152 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
153 * from setup, they're handled identically.
154 *
155 * We always assume that blocks are of size PAGE_SIZE.
156 */
157 struct swap_extent {
158 struct rb_node rb_node;
159 pgoff_t start_page;
160 pgoff_t nr_pages;
161 sector_t start_block;
162 };
163
164 /*
165 * Max bad pages in the new format..
166 */
167 #define MAX_SWAP_BADPAGES \
168 ((offsetof(union swap_header, magic.magic) - \
169 offsetof(union swap_header, info.badpages)) / sizeof(int))
170
171 enum {
172 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
173 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
174 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
175 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
176 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
177 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
178 SWP_BLKDEV = (1 << 6), /* its a block device */
179 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
180 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
181 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
182 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
183 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
184 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
185 /* add others here before... */
186 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
187 };
188
189 #define SWAP_CLUSTER_MAX 32UL
190 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
191
192 /* Bit flag in swap_map */
193 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
194 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
195
196 /* Special value in first swap_map */
197 #define SWAP_MAP_MAX 0x3e /* Max count */
198 #define SWAP_MAP_BAD 0x3f /* Note page is bad */
199 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
200
201 /* Special value in each swap_map continuation */
202 #define SWAP_CONT_MAX 0x7f /* Max count */
203
204 /*
205 * We use this to track usage of a cluster. A cluster is a block of swap disk
206 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
207 * free clusters are organized into a list. We fetch an entry from the list to
208 * get a free cluster.
209 *
210 * The data field stores next cluster if the cluster is free or cluster usage
211 * counter otherwise. The flags field determines if a cluster is free. This is
212 * protected by swap_info_struct.lock.
213 */
214 struct swap_cluster_info {
215 spinlock_t lock; /*
216 * Protect swap_cluster_info fields
217 * and swap_info_struct->swap_map
218 * elements correspond to the swap
219 * cluster
220 */
221 unsigned int data:24;
222 unsigned int flags:8;
223 };
224 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
225 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
226 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
227
228 /*
229 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
230 * its own cluster and swapout sequentially. The purpose is to optimize swapout
231 * throughput.
232 */
233 struct percpu_cluster {
234 struct swap_cluster_info index; /* Current cluster index */
235 unsigned int next; /* Likely next allocation offset */
236 };
237
238 struct swap_cluster_list {
239 struct swap_cluster_info head;
240 struct swap_cluster_info tail;
241 };
242
243 /*
244 * The in-memory structure used to track swap areas.
245 */
246 struct swap_info_struct {
247 struct percpu_ref users; /* indicate and keep swap device valid. */
248 unsigned long flags; /* SWP_USED etc: see above */
249 signed short prio; /* swap priority of this type */
250 struct plist_node list; /* entry in swap_active_head */
251 signed char type; /* strange name for an index */
252 unsigned int max; /* extent of the swap_map */
253 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
254 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
255 struct swap_cluster_list free_clusters; /* free clusters list */
256 unsigned int lowest_bit; /* index of first free in swap_map */
257 unsigned int highest_bit; /* index of last free in swap_map */
258 unsigned int pages; /* total of usable pages of swap */
259 unsigned int inuse_pages; /* number of those currently in use */
260 unsigned int cluster_next; /* likely index for next allocation */
261 unsigned int cluster_nr; /* countdown to next cluster search */
262 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
263 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
264 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
265 struct block_device *bdev; /* swap device or bdev of swap file */
266 struct file *swap_file; /* seldom referenced */
267 unsigned int old_block_size; /* seldom referenced */
268 struct completion comp; /* seldom referenced */
269 #ifdef CONFIG_FRONTSWAP
270 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
271 atomic_t frontswap_pages; /* frontswap pages in-use counter */
272 #endif
273 spinlock_t lock; /*
274 * protect map scan related fields like
275 * swap_map, lowest_bit, highest_bit,
276 * inuse_pages, cluster_next,
277 * cluster_nr, lowest_alloc,
278 * highest_alloc, free/discard cluster
279 * list. other fields are only changed
280 * at swapon/swapoff, so are protected
281 * by swap_lock. changing flags need
282 * hold this lock and swap_lock. If
283 * both locks need hold, hold swap_lock
284 * first.
285 */
286 spinlock_t cont_lock; /*
287 * protect swap count continuation page
288 * list.
289 */
290 struct work_struct discard_work; /* discard worker */
291 struct swap_cluster_list discard_clusters; /* discard clusters list */
292 struct plist_node avail_lists[]; /*
293 * entries in swap_avail_heads, one
294 * entry per node.
295 * Must be last as the number of the
296 * array is nr_node_ids, which is not
297 * a fixed value so have to allocate
298 * dynamically.
299 * And it has to be an array so that
300 * plist_for_each_* can work.
301 */
302 };
303
304 #ifdef CONFIG_64BIT
305 #define SWAP_RA_ORDER_CEILING 5
306 #else
307 /* Avoid stack overflow, because we need to save part of page table */
308 #define SWAP_RA_ORDER_CEILING 3
309 #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
310 #endif
311
312 struct vma_swap_readahead {
313 unsigned short win;
314 unsigned short offset;
315 unsigned short nr_pte;
316 #ifdef CONFIG_64BIT
317 pte_t *ptes;
318 #else
319 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
320 #endif
321 };
322
323 static inline swp_entry_t folio_swap_entry(struct folio *folio)
324 {
325 swp_entry_t entry = { .val = page_private(&folio->page) };
326 return entry;
327 }
328
329 /* linux/mm/workingset.c */
330 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
331 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
332 void workingset_refault(struct folio *folio, void *shadow);
333 void workingset_activation(struct folio *folio);
334
335 /* Only track the nodes of mappings with shadow entries */
336 void workingset_update_node(struct xa_node *node);
337 extern struct list_lru shadow_nodes;
338 #define mapping_set_update(xas, mapping) do { \
339 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
340 xas_set_update(xas, workingset_update_node); \
341 xas_set_lru(xas, &shadow_nodes); \
342 } \
343 } while (0)
344
345 /* linux/mm/page_alloc.c */
346 extern unsigned long totalreserve_pages;
347
348 /* Definition of global_zone_page_state not available yet */
349 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
350
351
352 /* linux/mm/swap.c */
353 extern void lru_note_cost(struct lruvec *lruvec, bool file,
354 unsigned int nr_pages);
355 extern void lru_note_cost_folio(struct folio *);
356 extern void folio_add_lru(struct folio *);
357 extern void lru_cache_add(struct page *);
358 void mark_page_accessed(struct page *);
359 void folio_mark_accessed(struct folio *);
360
361 extern atomic_t lru_disable_count;
362
363 static inline bool lru_cache_disabled(void)
364 {
365 return atomic_read(&lru_disable_count);
366 }
367
368 static inline void lru_cache_enable(void)
369 {
370 atomic_dec(&lru_disable_count);
371 }
372
373 extern void lru_cache_disable(void);
374 extern void lru_add_drain(void);
375 extern void lru_add_drain_cpu(int cpu);
376 extern void lru_add_drain_cpu_zone(struct zone *zone);
377 extern void lru_add_drain_all(void);
378 extern void deactivate_page(struct page *page);
379 extern void mark_page_lazyfree(struct page *page);
380 extern void swap_setup(void);
381
382 extern void lru_cache_add_inactive_or_unevictable(struct page *page,
383 struct vm_area_struct *vma);
384
385 /* linux/mm/vmscan.c */
386 extern unsigned long zone_reclaimable_pages(struct zone *zone);
387 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
388 gfp_t gfp_mask, nodemask_t *mask);
389 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
390 unsigned long nr_pages,
391 gfp_t gfp_mask,
392 bool may_swap);
393 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
394 gfp_t gfp_mask, bool noswap,
395 pg_data_t *pgdat,
396 unsigned long *nr_scanned);
397 extern unsigned long shrink_all_memory(unsigned long nr_pages);
398 extern int vm_swappiness;
399 long remove_mapping(struct address_space *mapping, struct folio *folio);
400
401 extern unsigned long reclaim_pages(struct list_head *page_list);
402 #ifdef CONFIG_NUMA
403 extern int node_reclaim_mode;
404 extern int sysctl_min_unmapped_ratio;
405 extern int sysctl_min_slab_ratio;
406 #else
407 #define node_reclaim_mode 0
408 #endif
409
410 static inline bool node_reclaim_enabled(void)
411 {
412 /* Is any node_reclaim_mode bit set? */
413 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
414 }
415
416 extern void check_move_unevictable_pages(struct pagevec *pvec);
417
418 extern void kswapd_run(int nid);
419 extern void kswapd_stop(int nid);
420
421 #ifdef CONFIG_SWAP
422
423 #include <linux/blk_types.h> /* for bio_end_io_t */
424
425 /* linux/mm/page_io.c */
426 extern int swap_readpage(struct page *page, bool do_poll);
427 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
428 extern void end_swap_bio_write(struct bio *bio);
429 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
430 bio_end_io_t end_write_func);
431 bool swap_dirty_folio(struct address_space *mapping, struct folio *folio);
432
433 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
434 unsigned long nr_pages, sector_t start_block);
435 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
436 sector_t *);
437
438 /* linux/mm/swap_state.c */
439 /* One swap address space for each 64M swap space */
440 #define SWAP_ADDRESS_SPACE_SHIFT 14
441 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
442 extern struct address_space *swapper_spaces[];
443 #define swap_address_space(entry) \
444 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
445 >> SWAP_ADDRESS_SPACE_SHIFT])
446 static inline unsigned long total_swapcache_pages(void)
447 {
448 return global_node_page_state(NR_SWAPCACHE);
449 }
450
451 extern void show_swap_cache_info(void);
452 extern int add_to_swap(struct page *page);
453 extern void *get_shadow_from_swap_cache(swp_entry_t entry);
454 extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
455 gfp_t gfp, void **shadowp);
456 extern void __delete_from_swap_cache(struct page *page,
457 swp_entry_t entry, void *shadow);
458 extern void delete_from_swap_cache(struct page *);
459 extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
460 unsigned long end);
461 extern void free_swap_cache(struct page *);
462 extern void free_page_and_swap_cache(struct page *);
463 extern void free_pages_and_swap_cache(struct page **, int);
464 extern struct page *lookup_swap_cache(swp_entry_t entry,
465 struct vm_area_struct *vma,
466 unsigned long addr);
467 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
468 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
469 struct vm_area_struct *vma, unsigned long addr,
470 bool do_poll);
471 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
472 struct vm_area_struct *vma, unsigned long addr,
473 bool *new_page_allocated);
474 extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
475 struct vm_fault *vmf);
476 extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
477 struct vm_fault *vmf);
478
479 /* linux/mm/swapfile.c */
480 extern atomic_long_t nr_swap_pages;
481 extern long total_swap_pages;
482 extern atomic_t nr_rotate_swap;
483 extern bool has_usable_swap(void);
484
485 /* Swap 50% full? Release swapcache more aggressively.. */
486 static inline bool vm_swap_full(void)
487 {
488 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
489 }
490
491 static inline long get_nr_swap_pages(void)
492 {
493 return atomic_long_read(&nr_swap_pages);
494 }
495
496 extern void si_swapinfo(struct sysinfo *);
497 extern swp_entry_t get_swap_page(struct page *page);
498 extern void put_swap_page(struct page *page, swp_entry_t entry);
499 extern swp_entry_t get_swap_page_of_type(int);
500 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
501 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
502 extern void swap_shmem_alloc(swp_entry_t);
503 extern int swap_duplicate(swp_entry_t);
504 extern int swapcache_prepare(swp_entry_t);
505 extern void swap_free(swp_entry_t);
506 extern void swapcache_free_entries(swp_entry_t *entries, int n);
507 extern int free_swap_and_cache(swp_entry_t);
508 int swap_type_of(dev_t device, sector_t offset);
509 int find_first_swap(dev_t *device);
510 extern unsigned int count_swap_pages(int, int);
511 extern sector_t swapdev_block(int, pgoff_t);
512 extern int page_swapcount(struct page *);
513 extern int __swap_count(swp_entry_t entry);
514 extern int __swp_swapcount(swp_entry_t entry);
515 extern int swp_swapcount(swp_entry_t entry);
516 extern struct swap_info_struct *page_swap_info(struct page *);
517 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
518 extern int try_to_free_swap(struct page *);
519 struct backing_dev_info;
520 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
521 extern void exit_swap_address_space(unsigned int type);
522 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
523 sector_t swap_page_sector(struct page *page);
524
525 static inline void put_swap_device(struct swap_info_struct *si)
526 {
527 percpu_ref_put(&si->users);
528 }
529
530 #else /* CONFIG_SWAP */
531
532 static inline int swap_readpage(struct page *page, bool do_poll)
533 {
534 return 0;
535 }
536
537 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
538 {
539 return NULL;
540 }
541
542 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
543 {
544 return NULL;
545 }
546
547 static inline void put_swap_device(struct swap_info_struct *si)
548 {
549 }
550
551 static inline struct address_space *swap_address_space(swp_entry_t entry)
552 {
553 return NULL;
554 }
555
556 #define get_nr_swap_pages() 0L
557 #define total_swap_pages 0L
558 #define total_swapcache_pages() 0UL
559 #define vm_swap_full() 0
560
561 #define si_swapinfo(val) \
562 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
563 /* only sparc can not include linux/pagemap.h in this file
564 * so leave put_page and release_pages undeclared... */
565 #define free_page_and_swap_cache(page) \
566 put_page(page)
567 #define free_pages_and_swap_cache(pages, nr) \
568 release_pages((pages), (nr));
569
570 static inline void free_swap_cache(struct page *page)
571 {
572 }
573
574 static inline void show_swap_cache_info(void)
575 {
576 }
577
578 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
579 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
580
581 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
582 {
583 return 0;
584 }
585
586 static inline void swap_shmem_alloc(swp_entry_t swp)
587 {
588 }
589
590 static inline int swap_duplicate(swp_entry_t swp)
591 {
592 return 0;
593 }
594
595 static inline void swap_free(swp_entry_t swp)
596 {
597 }
598
599 static inline void put_swap_page(struct page *page, swp_entry_t swp)
600 {
601 }
602
603 static inline struct page *swap_cluster_readahead(swp_entry_t entry,
604 gfp_t gfp_mask, struct vm_fault *vmf)
605 {
606 return NULL;
607 }
608
609 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
610 struct vm_fault *vmf)
611 {
612 return NULL;
613 }
614
615 static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
616 {
617 return 0;
618 }
619
620 static inline struct page *lookup_swap_cache(swp_entry_t swp,
621 struct vm_area_struct *vma,
622 unsigned long addr)
623 {
624 return NULL;
625 }
626
627 static inline
628 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
629 {
630 return find_get_page(mapping, index);
631 }
632
633 static inline int add_to_swap(struct page *page)
634 {
635 return 0;
636 }
637
638 static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
639 {
640 return NULL;
641 }
642
643 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
644 gfp_t gfp_mask, void **shadowp)
645 {
646 return -1;
647 }
648
649 static inline void __delete_from_swap_cache(struct page *page,
650 swp_entry_t entry, void *shadow)
651 {
652 }
653
654 static inline void delete_from_swap_cache(struct page *page)
655 {
656 }
657
658 static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
659 unsigned long end)
660 {
661 }
662
663 static inline int page_swapcount(struct page *page)
664 {
665 return 0;
666 }
667
668 static inline int __swap_count(swp_entry_t entry)
669 {
670 return 0;
671 }
672
673 static inline int __swp_swapcount(swp_entry_t entry)
674 {
675 return 0;
676 }
677
678 static inline int swp_swapcount(swp_entry_t entry)
679 {
680 return 0;
681 }
682
683 static inline int try_to_free_swap(struct page *page)
684 {
685 return 0;
686 }
687
688 static inline swp_entry_t get_swap_page(struct page *page)
689 {
690 swp_entry_t entry;
691 entry.val = 0;
692 return entry;
693 }
694
695 #endif /* CONFIG_SWAP */
696
697 #ifdef CONFIG_THP_SWAP
698 extern int split_swap_cluster(swp_entry_t entry);
699 #else
700 static inline int split_swap_cluster(swp_entry_t entry)
701 {
702 return 0;
703 }
704 #endif
705
706 #ifdef CONFIG_MEMCG
707 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
708 {
709 /* Cgroup2 doesn't have per-cgroup swappiness */
710 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
711 return vm_swappiness;
712
713 /* root ? */
714 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
715 return vm_swappiness;
716
717 return memcg->swappiness;
718 }
719 #else
720 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
721 {
722 return vm_swappiness;
723 }
724 #endif
725
726 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
727 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
728 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
729 {
730 if (mem_cgroup_disabled())
731 return;
732 __cgroup_throttle_swaprate(page, gfp_mask);
733 }
734 #else
735 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
736 {
737 }
738 #endif
739
740 #ifdef CONFIG_MEMCG_SWAP
741 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
742 extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
743 static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
744 {
745 if (mem_cgroup_disabled())
746 return 0;
747 return __mem_cgroup_try_charge_swap(page, entry);
748 }
749
750 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
751 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
752 {
753 if (mem_cgroup_disabled())
754 return;
755 __mem_cgroup_uncharge_swap(entry, nr_pages);
756 }
757
758 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
759 extern bool mem_cgroup_swap_full(struct page *page);
760 #else
761 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
762 {
763 }
764
765 static inline int mem_cgroup_try_charge_swap(struct page *page,
766 swp_entry_t entry)
767 {
768 return 0;
769 }
770
771 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
772 unsigned int nr_pages)
773 {
774 }
775
776 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
777 {
778 return get_nr_swap_pages();
779 }
780
781 static inline bool mem_cgroup_swap_full(struct page *page)
782 {
783 return vm_swap_full();
784 }
785 #endif
786
787 #endif /* __KERNEL__*/
788 #endif /* _LINUX_SWAP_H */