]> git.ipfire.org Git - thirdparty/linux.git/blob - include/linux/swap.h
Merge tag 'vfs-6.9.super' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[thirdparty/linux.git] / include / linux / swap.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAP_H
3 #define _LINUX_SWAP_H
4
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
17 #include <asm/page.h>
18
19 struct notifier_block;
20
21 struct bio;
22
23 struct pagevec;
24
25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK 0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT 0
28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
31
32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
35 #define SWAP_BATCH 64
36
37 static inline int current_is_kswapd(void)
38 {
39 return current->flags & PF_KSWAPD;
40 }
41
42 /*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50 #define MAX_SWAPFILES_SHIFT 5
51
52 /*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
58 /*
59 * PTE markers are used to persist information onto PTEs that otherwise
60 * should be a none pte. As its name "PTE" hints, it should only be
61 * applied to the leaves of pgtables.
62 */
63 #define SWP_PTE_MARKER_NUM 1
64 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
66
67 /*
68 * Unaddressable device memory support. See include/linux/hmm.h and
69 * Documentation/mm/hmm.rst. Short description is we need struct pages for
70 * device memory that is unaddressable (inaccessible) by CPU, so that we can
71 * migrate part of a process memory to device memory.
72 *
73 * When a page is migrated from CPU to device, we set the CPU page table entry
74 * to a special SWP_DEVICE_{READ|WRITE} entry.
75 *
76 * When a page is mapped by the device for exclusive access we set the CPU page
77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
78 */
79 #ifdef CONFIG_DEVICE_PRIVATE
80 #define SWP_DEVICE_NUM 4
81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
85 #else
86 #define SWP_DEVICE_NUM 0
87 #endif
88
89 /*
90 * Page migration support.
91 *
92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93 * indicates that the referenced (part of) an anonymous page is exclusive to
94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95 * (part of) an anonymous page that are mapped writable are exclusive to a
96 * single process.
97 */
98 #ifdef CONFIG_MIGRATION
99 #define SWP_MIGRATION_NUM 3
100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
103 #else
104 #define SWP_MIGRATION_NUM 0
105 #endif
106
107 /*
108 * Handling of hardware poisoned pages with memory corruption.
109 */
110 #ifdef CONFIG_MEMORY_FAILURE
111 #define SWP_HWPOISON_NUM 1
112 #define SWP_HWPOISON MAX_SWAPFILES
113 #else
114 #define SWP_HWPOISON_NUM 0
115 #endif
116
117 #define MAX_SWAPFILES \
118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
120 SWP_PTE_MARKER_NUM)
121
122 /*
123 * Magic header for a swap area. The first part of the union is
124 * what the swap magic looks like for the old (limited to 128MB)
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
127 * kilobyte is reserved for boot loader or disk label stuff...
128 *
129 * Having the magic at the end of the PAGE_SIZE makes detecting swap
130 * areas somewhat tricky on machines that support multiple page sizes.
131 * For 2.5 we'll probably want to move the magic to just beyond the
132 * bootbits...
133 */
134 union swap_header {
135 struct {
136 char reserved[PAGE_SIZE - 10];
137 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
138 } magic;
139 struct {
140 char bootbits[1024]; /* Space for disklabel etc. */
141 __u32 version;
142 __u32 last_page;
143 __u32 nr_badpages;
144 unsigned char sws_uuid[16];
145 unsigned char sws_volume[16];
146 __u32 padding[117];
147 __u32 badpages[1];
148 } info;
149 };
150
151 /*
152 * current->reclaim_state points to one of these when a task is running
153 * memory reclaim
154 */
155 struct reclaim_state {
156 /* pages reclaimed outside of LRU-based reclaim */
157 unsigned long reclaimed;
158 #ifdef CONFIG_LRU_GEN
159 /* per-thread mm walk data */
160 struct lru_gen_mm_walk *mm_walk;
161 #endif
162 };
163
164 /*
165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
166 * reclaim
167 * @pages: number of pages reclaimed
168 *
169 * If the current process is undergoing a reclaim operation, increment the
170 * number of reclaimed pages by @pages.
171 */
172 static inline void mm_account_reclaimed_pages(unsigned long pages)
173 {
174 if (current->reclaim_state)
175 current->reclaim_state->reclaimed += pages;
176 }
177
178 #ifdef __KERNEL__
179
180 struct address_space;
181 struct sysinfo;
182 struct writeback_control;
183 struct zone;
184
185 /*
186 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
187 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
188 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
189 * from setup, they're handled identically.
190 *
191 * We always assume that blocks are of size PAGE_SIZE.
192 */
193 struct swap_extent {
194 struct rb_node rb_node;
195 pgoff_t start_page;
196 pgoff_t nr_pages;
197 sector_t start_block;
198 };
199
200 /*
201 * Max bad pages in the new format..
202 */
203 #define MAX_SWAP_BADPAGES \
204 ((offsetof(union swap_header, magic.magic) - \
205 offsetof(union swap_header, info.badpages)) / sizeof(int))
206
207 enum {
208 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
209 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
210 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
211 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
212 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
213 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
214 SWP_BLKDEV = (1 << 6), /* its a block device */
215 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
216 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
217 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
218 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
219 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
220 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
221 /* add others here before... */
222 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
223 };
224
225 #define SWAP_CLUSTER_MAX 32UL
226 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
227
228 /* Bit flag in swap_map */
229 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
230 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
231
232 /* Special value in first swap_map */
233 #define SWAP_MAP_MAX 0x3e /* Max count */
234 #define SWAP_MAP_BAD 0x3f /* Note page is bad */
235 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
236
237 /* Special value in each swap_map continuation */
238 #define SWAP_CONT_MAX 0x7f /* Max count */
239
240 /*
241 * We use this to track usage of a cluster. A cluster is a block of swap disk
242 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
243 * free clusters are organized into a list. We fetch an entry from the list to
244 * get a free cluster.
245 *
246 * The data field stores next cluster if the cluster is free or cluster usage
247 * counter otherwise. The flags field determines if a cluster is free. This is
248 * protected by swap_info_struct.lock.
249 */
250 struct swap_cluster_info {
251 spinlock_t lock; /*
252 * Protect swap_cluster_info fields
253 * and swap_info_struct->swap_map
254 * elements correspond to the swap
255 * cluster
256 */
257 unsigned int data:24;
258 unsigned int flags:8;
259 };
260 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
261 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
262 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
263
264 /*
265 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
266 * its own cluster and swapout sequentially. The purpose is to optimize swapout
267 * throughput.
268 */
269 struct percpu_cluster {
270 struct swap_cluster_info index; /* Current cluster index */
271 unsigned int next; /* Likely next allocation offset */
272 };
273
274 struct swap_cluster_list {
275 struct swap_cluster_info head;
276 struct swap_cluster_info tail;
277 };
278
279 /*
280 * The in-memory structure used to track swap areas.
281 */
282 struct swap_info_struct {
283 struct percpu_ref users; /* indicate and keep swap device valid. */
284 unsigned long flags; /* SWP_USED etc: see above */
285 signed short prio; /* swap priority of this type */
286 struct plist_node list; /* entry in swap_active_head */
287 signed char type; /* strange name for an index */
288 unsigned int max; /* extent of the swap_map */
289 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
290 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
291 struct swap_cluster_list free_clusters; /* free clusters list */
292 unsigned int lowest_bit; /* index of first free in swap_map */
293 unsigned int highest_bit; /* index of last free in swap_map */
294 unsigned int pages; /* total of usable pages of swap */
295 unsigned int inuse_pages; /* number of those currently in use */
296 unsigned int cluster_next; /* likely index for next allocation */
297 unsigned int cluster_nr; /* countdown to next cluster search */
298 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
299 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
300 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
301 struct file *bdev_file; /* open handle of the bdev */
302 struct block_device *bdev; /* swap device or bdev of swap file */
303 struct file *swap_file; /* seldom referenced */
304 unsigned int old_block_size; /* seldom referenced */
305 struct completion comp; /* seldom referenced */
306 spinlock_t lock; /*
307 * protect map scan related fields like
308 * swap_map, lowest_bit, highest_bit,
309 * inuse_pages, cluster_next,
310 * cluster_nr, lowest_alloc,
311 * highest_alloc, free/discard cluster
312 * list. other fields are only changed
313 * at swapon/swapoff, so are protected
314 * by swap_lock. changing flags need
315 * hold this lock and swap_lock. If
316 * both locks need hold, hold swap_lock
317 * first.
318 */
319 spinlock_t cont_lock; /*
320 * protect swap count continuation page
321 * list.
322 */
323 struct work_struct discard_work; /* discard worker */
324 struct swap_cluster_list discard_clusters; /* discard clusters list */
325 struct plist_node avail_lists[]; /*
326 * entries in swap_avail_heads, one
327 * entry per node.
328 * Must be last as the number of the
329 * array is nr_node_ids, which is not
330 * a fixed value so have to allocate
331 * dynamically.
332 * And it has to be an array so that
333 * plist_for_each_* can work.
334 */
335 };
336
337 static inline swp_entry_t page_swap_entry(struct page *page)
338 {
339 struct folio *folio = page_folio(page);
340 swp_entry_t entry = folio->swap;
341
342 entry.val += folio_page_idx(folio, page);
343 return entry;
344 }
345
346 /* linux/mm/workingset.c */
347 bool workingset_test_recent(void *shadow, bool file, bool *workingset);
348 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
349 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
350 void workingset_refault(struct folio *folio, void *shadow);
351 void workingset_activation(struct folio *folio);
352
353 /* Only track the nodes of mappings with shadow entries */
354 void workingset_update_node(struct xa_node *node);
355 extern struct list_lru shadow_nodes;
356 #define mapping_set_update(xas, mapping) do { \
357 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
358 xas_set_update(xas, workingset_update_node); \
359 xas_set_lru(xas, &shadow_nodes); \
360 } \
361 } while (0)
362
363 /* linux/mm/page_alloc.c */
364 extern unsigned long totalreserve_pages;
365
366 /* Definition of global_zone_page_state not available yet */
367 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
368
369
370 /* linux/mm/swap.c */
371 void lru_note_cost(struct lruvec *lruvec, bool file,
372 unsigned int nr_io, unsigned int nr_rotated);
373 void lru_note_cost_refault(struct folio *);
374 void folio_add_lru(struct folio *);
375 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
376 void mark_page_accessed(struct page *);
377 void folio_mark_accessed(struct folio *);
378
379 extern atomic_t lru_disable_count;
380
381 static inline bool lru_cache_disabled(void)
382 {
383 return atomic_read(&lru_disable_count);
384 }
385
386 static inline void lru_cache_enable(void)
387 {
388 atomic_dec(&lru_disable_count);
389 }
390
391 extern void lru_cache_disable(void);
392 extern void lru_add_drain(void);
393 extern void lru_add_drain_cpu(int cpu);
394 extern void lru_add_drain_cpu_zone(struct zone *zone);
395 extern void lru_add_drain_all(void);
396 void folio_deactivate(struct folio *folio);
397 void folio_mark_lazyfree(struct folio *folio);
398 extern void swap_setup(void);
399
400 /* linux/mm/vmscan.c */
401 extern unsigned long zone_reclaimable_pages(struct zone *zone);
402 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
403 gfp_t gfp_mask, nodemask_t *mask);
404
405 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
406 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
407 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
408 unsigned long nr_pages,
409 gfp_t gfp_mask,
410 unsigned int reclaim_options);
411 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
412 gfp_t gfp_mask, bool noswap,
413 pg_data_t *pgdat,
414 unsigned long *nr_scanned);
415 extern unsigned long shrink_all_memory(unsigned long nr_pages);
416 extern int vm_swappiness;
417 long remove_mapping(struct address_space *mapping, struct folio *folio);
418
419 #ifdef CONFIG_NUMA
420 extern int node_reclaim_mode;
421 extern int sysctl_min_unmapped_ratio;
422 extern int sysctl_min_slab_ratio;
423 #else
424 #define node_reclaim_mode 0
425 #endif
426
427 static inline bool node_reclaim_enabled(void)
428 {
429 /* Is any node_reclaim_mode bit set? */
430 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
431 }
432
433 void check_move_unevictable_folios(struct folio_batch *fbatch);
434
435 extern void __meminit kswapd_run(int nid);
436 extern void __meminit kswapd_stop(int nid);
437
438 #ifdef CONFIG_SWAP
439
440 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
441 unsigned long nr_pages, sector_t start_block);
442 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
443 sector_t *);
444
445 static inline unsigned long total_swapcache_pages(void)
446 {
447 return global_node_page_state(NR_SWAPCACHE);
448 }
449
450 extern void free_swap_cache(struct page *page);
451 extern void free_page_and_swap_cache(struct page *);
452 extern void free_pages_and_swap_cache(struct encoded_page **, int);
453 /* linux/mm/swapfile.c */
454 extern atomic_long_t nr_swap_pages;
455 extern long total_swap_pages;
456 extern atomic_t nr_rotate_swap;
457 extern bool has_usable_swap(void);
458
459 /* Swap 50% full? Release swapcache more aggressively.. */
460 static inline bool vm_swap_full(void)
461 {
462 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
463 }
464
465 static inline long get_nr_swap_pages(void)
466 {
467 return atomic_long_read(&nr_swap_pages);
468 }
469
470 extern void si_swapinfo(struct sysinfo *);
471 swp_entry_t folio_alloc_swap(struct folio *folio);
472 bool folio_free_swap(struct folio *folio);
473 void put_swap_folio(struct folio *folio, swp_entry_t entry);
474 extern swp_entry_t get_swap_page_of_type(int);
475 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
476 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
477 extern void swap_shmem_alloc(swp_entry_t);
478 extern int swap_duplicate(swp_entry_t);
479 extern int swapcache_prepare(swp_entry_t);
480 extern void swap_free(swp_entry_t);
481 extern void swapcache_free_entries(swp_entry_t *entries, int n);
482 extern int free_swap_and_cache(swp_entry_t);
483 int swap_type_of(dev_t device, sector_t offset);
484 int find_first_swap(dev_t *device);
485 extern unsigned int count_swap_pages(int, int);
486 extern sector_t swapdev_block(int, pgoff_t);
487 extern int __swap_count(swp_entry_t entry);
488 extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
489 extern int swp_swapcount(swp_entry_t entry);
490 struct swap_info_struct *swp_swap_info(swp_entry_t entry);
491 struct backing_dev_info;
492 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
493 extern void exit_swap_address_space(unsigned int type);
494 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
495 sector_t swap_folio_sector(struct folio *folio);
496
497 static inline void put_swap_device(struct swap_info_struct *si)
498 {
499 percpu_ref_put(&si->users);
500 }
501
502 #else /* CONFIG_SWAP */
503 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
504 {
505 return NULL;
506 }
507
508 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
509 {
510 return NULL;
511 }
512
513 static inline void put_swap_device(struct swap_info_struct *si)
514 {
515 }
516
517 #define get_nr_swap_pages() 0L
518 #define total_swap_pages 0L
519 #define total_swapcache_pages() 0UL
520 #define vm_swap_full() 0
521
522 #define si_swapinfo(val) \
523 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
524 /* only sparc can not include linux/pagemap.h in this file
525 * so leave put_page and release_pages undeclared... */
526 #define free_page_and_swap_cache(page) \
527 put_page(page)
528 #define free_pages_and_swap_cache(pages, nr) \
529 release_pages((pages), (nr));
530
531 /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
532 #define free_swap_and_cache(e) is_pfn_swap_entry(e)
533
534 static inline void free_swap_cache(struct page *page)
535 {
536 }
537
538 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
539 {
540 return 0;
541 }
542
543 static inline void swap_shmem_alloc(swp_entry_t swp)
544 {
545 }
546
547 static inline int swap_duplicate(swp_entry_t swp)
548 {
549 return 0;
550 }
551
552 static inline int swapcache_prepare(swp_entry_t swp)
553 {
554 return 0;
555 }
556
557 static inline void swap_free(swp_entry_t swp)
558 {
559 }
560
561 static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
562 {
563 }
564
565 static inline int __swap_count(swp_entry_t entry)
566 {
567 return 0;
568 }
569
570 static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
571 {
572 return 0;
573 }
574
575 static inline int swp_swapcount(swp_entry_t entry)
576 {
577 return 0;
578 }
579
580 static inline swp_entry_t folio_alloc_swap(struct folio *folio)
581 {
582 swp_entry_t entry;
583 entry.val = 0;
584 return entry;
585 }
586
587 static inline bool folio_free_swap(struct folio *folio)
588 {
589 return false;
590 }
591
592 static inline int add_swap_extent(struct swap_info_struct *sis,
593 unsigned long start_page,
594 unsigned long nr_pages, sector_t start_block)
595 {
596 return -EINVAL;
597 }
598 #endif /* CONFIG_SWAP */
599
600 #ifdef CONFIG_THP_SWAP
601 extern int split_swap_cluster(swp_entry_t entry);
602 #else
603 static inline int split_swap_cluster(swp_entry_t entry)
604 {
605 return 0;
606 }
607 #endif
608
609 #ifdef CONFIG_MEMCG
610 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
611 {
612 /* Cgroup2 doesn't have per-cgroup swappiness */
613 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
614 return READ_ONCE(vm_swappiness);
615
616 /* root ? */
617 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
618 return READ_ONCE(vm_swappiness);
619
620 return READ_ONCE(memcg->swappiness);
621 }
622 #else
623 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
624 {
625 return READ_ONCE(vm_swappiness);
626 }
627 #endif
628
629 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
630 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
631 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
632 {
633 if (mem_cgroup_disabled())
634 return;
635 __folio_throttle_swaprate(folio, gfp);
636 }
637 #else
638 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
639 {
640 }
641 #endif
642
643 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
644 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
645 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
646 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
647 swp_entry_t entry)
648 {
649 if (mem_cgroup_disabled())
650 return 0;
651 return __mem_cgroup_try_charge_swap(folio, entry);
652 }
653
654 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
655 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
656 {
657 if (mem_cgroup_disabled())
658 return;
659 __mem_cgroup_uncharge_swap(entry, nr_pages);
660 }
661
662 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
663 extern bool mem_cgroup_swap_full(struct folio *folio);
664 #else
665 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
666 {
667 }
668
669 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
670 swp_entry_t entry)
671 {
672 return 0;
673 }
674
675 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
676 unsigned int nr_pages)
677 {
678 }
679
680 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
681 {
682 return get_nr_swap_pages();
683 }
684
685 static inline bool mem_cgroup_swap_full(struct folio *folio)
686 {
687 return vm_swap_full();
688 }
689 #endif
690
691 #endif /* __KERNEL__*/
692 #endif /* _LINUX_SWAP_H */