]> git.ipfire.org Git - thirdparty/linux.git/blame - include/linux/swap.h
mm: workingset: tell cache transitions from workingset thrashing
[thirdparty/linux.git] / include / linux / swap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
1da177e4
LT
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
66e1707b 9#include <linux/memcontrol.h>
1da177e4 10#include <linux/sched.h>
af936a16 11#include <linux/node.h>
33806f06 12#include <linux/fs.h>
60063497 13#include <linux/atomic.h>
c53954a0 14#include <linux/page-flags.h>
1da177e4
LT
15#include <asm/page.h>
16
8bc719d3
MS
17struct notifier_block;
18
ab954160
AM
19struct bio;
20
1da177e4
LT
21#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
22#define SWAP_FLAG_PRIO_MASK 0x7fff
23#define SWAP_FLAG_PRIO_SHIFT 0
dcf6b7dd
RA
24#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
25#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
26#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
1da177e4 27
d15cab97 28#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
dcf6b7dd
RA
29 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
30 SWAP_FLAG_DISCARD_PAGES)
36005bae 31#define SWAP_BATCH 64
d15cab97 32
1da177e4
LT
33static inline int current_is_kswapd(void)
34{
35 return current->flags & PF_KSWAPD;
36}
37
38/*
39 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
40 * be swapped to. The swap type and the offset into that swap type are
41 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
42 * for the type means that the maximum number of swapcache pages is 27 bits
43 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
44 * the type/offset into the pte as 5/27 as well.
45 */
46#define MAX_SWAPFILES_SHIFT 5
a7420aa5
AK
47
48/*
49 * Use some of the swap files numbers for other purposes. This
50 * is a convenient way to hook into the VM to trigger special
51 * actions on faults.
52 */
53
5042db43
JG
54/*
55 * Unaddressable device memory support. See include/linux/hmm.h and
ad56b738 56 * Documentation/vm/hmm.rst. Short description is we need struct pages for
5042db43
JG
57 * device memory that is unaddressable (inaccessible) by CPU, so that we can
58 * migrate part of a process memory to device memory.
59 *
60 * When a page is migrated from CPU to device, we set the CPU page table entry
61 * to a special SWP_DEVICE_* entry.
62 */
63#ifdef CONFIG_DEVICE_PRIVATE
64#define SWP_DEVICE_NUM 2
65#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
66#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
67#else
68#define SWP_DEVICE_NUM 0
69#endif
70
a7420aa5
AK
71/*
72 * NUMA node memory migration support
73 */
74#ifdef CONFIG_MIGRATION
75#define SWP_MIGRATION_NUM 2
76#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
77#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
0697212a 78#else
a7420aa5 79#define SWP_MIGRATION_NUM 0
0697212a 80#endif
1da177e4 81
a7420aa5
AK
82/*
83 * Handling of hardware poisoned pages with memory corruption.
84 */
85#ifdef CONFIG_MEMORY_FAILURE
86#define SWP_HWPOISON_NUM 1
87#define SWP_HWPOISON MAX_SWAPFILES
88#else
89#define SWP_HWPOISON_NUM 0
90#endif
91
92#define MAX_SWAPFILES \
5042db43
JG
93 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
94 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
a7420aa5 95
1da177e4
LT
96/*
97 * Magic header for a swap area. The first part of the union is
98 * what the swap magic looks like for the old (limited to 128MB)
99 * swap area format, the second part of the union adds - in the
100 * old reserved area - some extra information. Note that the first
101 * kilobyte is reserved for boot loader or disk label stuff...
102 *
103 * Having the magic at the end of the PAGE_SIZE makes detecting swap
104 * areas somewhat tricky on machines that support multiple page sizes.
105 * For 2.5 we'll probably want to move the magic to just beyond the
106 * bootbits...
107 */
108union swap_header {
109 struct {
110 char reserved[PAGE_SIZE - 10];
111 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
112 } magic;
113 struct {
e8f03d02
AD
114 char bootbits[1024]; /* Space for disklabel etc. */
115 __u32 version;
116 __u32 last_page;
117 __u32 nr_badpages;
118 unsigned char sws_uuid[16];
119 unsigned char sws_volume[16];
120 __u32 padding[117];
121 __u32 badpages[1];
1da177e4
LT
122 } info;
123};
124
1da177e4
LT
125/*
126 * current->reclaim_state points to one of these when a task is running
127 * memory reclaim
128 */
129struct reclaim_state {
130 unsigned long reclaimed_slab;
131};
132
133#ifdef __KERNEL__
134
135struct address_space;
136struct sysinfo;
137struct writeback_control;
138struct zone;
139
140/*
141 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
142 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
143 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
144 * from setup, they're handled identically.
145 *
146 * We always assume that blocks are of size PAGE_SIZE.
147 */
148struct swap_extent {
149 struct list_head list;
150 pgoff_t start_page;
151 pgoff_t nr_pages;
152 sector_t start_block;
153};
154
155/*
156 * Max bad pages in the new format..
157 */
158#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
159#define MAX_SWAP_BADPAGES \
160 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
161
162enum {
163 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
164 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
dcf6b7dd 165 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
7992fde7 166 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
20137a49 167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
570a335b 168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
b2725643 169 SWP_BLKDEV = (1 << 6), /* its a block device */
62c230bc 170 SWP_FILE = (1 << 7), /* set after swap_activate success */
dcf6b7dd
RA
171 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
172 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
f0571429 173 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
539a6fea 174 SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
52b7efdb 175 /* add others here before... */
539a6fea 176 SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
1da177e4
LT
177};
178
d778df51 179#define SWAP_CLUSTER_MAX 32UL
748446bb 180#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
1da177e4 181
570a335b
HD
182#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
183#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
184#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
185#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
186#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
aaa46865 187#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
253d553b 188
2a8f9449
SL
189/*
190 * We use this to track usage of a cluster. A cluster is a block of swap disk
191 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
192 * free clusters are organized into a list. We fetch an entry from the list to
193 * get a free cluster.
194 *
195 * The data field stores next cluster if the cluster is free or cluster usage
196 * counter otherwise. The flags field determines if a cluster is free. This is
197 * protected by swap_info_struct.lock.
198 */
199struct swap_cluster_info {
235b6217
HY
200 spinlock_t lock; /*
201 * Protect swap_cluster_info fields
202 * and swap_info_struct->swap_map
203 * elements correspond to the swap
204 * cluster
205 */
2a8f9449
SL
206 unsigned int data:24;
207 unsigned int flags:8;
208};
209#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
210#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
e0709829 211#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
2a8f9449 212
ebc2a1a6
SL
213/*
214 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
215 * its own cluster and swapout sequentially. The purpose is to optimize swapout
216 * throughput.
217 */
218struct percpu_cluster {
219 struct swap_cluster_info index; /* Current cluster index */
220 unsigned int next; /* Likely next allocation offset */
221};
222
6b534915
HY
223struct swap_cluster_list {
224 struct swap_cluster_info head;
225 struct swap_cluster_info tail;
226};
227
1da177e4
LT
228/*
229 * The in-memory structure used to track swap areas.
1da177e4
LT
230 */
231struct swap_info_struct {
efa90a98
HD
232 unsigned long flags; /* SWP_USED etc: see above */
233 signed short prio; /* swap priority of this type */
18ab4d4c 234 struct plist_node list; /* entry in swap_active_head */
a2468cc9 235 struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
efa90a98 236 signed char type; /* strange name for an index */
7509765a
HD
237 unsigned int max; /* extent of the swap_map */
238 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
2a8f9449 239 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
6b534915 240 struct swap_cluster_list free_clusters; /* free clusters list */
7509765a
HD
241 unsigned int lowest_bit; /* index of first free in swap_map */
242 unsigned int highest_bit; /* index of last free in swap_map */
243 unsigned int pages; /* total of usable pages of swap */
244 unsigned int inuse_pages; /* number of those currently in use */
245 unsigned int cluster_next; /* likely index for next allocation */
246 unsigned int cluster_nr; /* countdown to next cluster search */
ebc2a1a6 247 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
7509765a
HD
248 struct swap_extent *curr_swap_extent;
249 struct swap_extent first_swap_extent;
250 struct block_device *bdev; /* swap device or bdev of swap file */
251 struct file *swap_file; /* seldom referenced */
252 unsigned int old_block_size; /* seldom referenced */
38b5faf4
DM
253#ifdef CONFIG_FRONTSWAP
254 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
255 atomic_t frontswap_pages; /* frontswap pages in-use counter */
256#endif
ec8acf20
SL
257 spinlock_t lock; /*
258 * protect map scan related fields like
259 * swap_map, lowest_bit, highest_bit,
260 * inuse_pages, cluster_next,
815c2c54
SL
261 * cluster_nr, lowest_alloc,
262 * highest_alloc, free/discard cluster
263 * list. other fields are only changed
264 * at swapon/swapoff, so are protected
265 * by swap_lock. changing flags need
266 * hold this lock and swap_lock. If
267 * both locks need hold, hold swap_lock
268 * first.
ec8acf20 269 */
2628bd6f
HY
270 spinlock_t cont_lock; /*
271 * protect swap count continuation page
272 * list.
273 */
815c2c54 274 struct work_struct discard_work; /* discard worker */
6b534915 275 struct swap_cluster_list discard_clusters; /* discard clusters list */
1da177e4
LT
276};
277
ec560175
HY
278#ifdef CONFIG_64BIT
279#define SWAP_RA_ORDER_CEILING 5
280#else
281/* Avoid stack overflow, because we need to save part of page table */
282#define SWAP_RA_ORDER_CEILING 3
283#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
284#endif
285
286struct vma_swap_readahead {
287 unsigned short win;
288 unsigned short offset;
289 unsigned short nr_pte;
290#ifdef CONFIG_64BIT
291 pte_t *ptes;
292#else
293 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
294#endif
295};
296
a528910e
JW
297/* linux/mm/workingset.c */
298void *workingset_eviction(struct address_space *mapping, struct page *page);
1899ad18 299void workingset_refault(struct page *page, void *shadow);
a528910e 300void workingset_activation(struct page *page);
c7df8ad2
MG
301
302/* Do not use directly, use workingset_lookup_update */
303void workingset_update_node(struct radix_tree_node *node);
304
305/* Returns workingset_update_node() if the mapping has shadow entries. */
306#define workingset_lookup_update(mapping) \
307({ \
308 radix_tree_update_node_t __helper = workingset_update_node; \
309 if (dax_mapping(mapping) || shmem_mapping(mapping)) \
310 __helper = NULL; \
311 __helper; \
312})
a528910e 313
1da177e4
LT
314/* linux/mm/page_alloc.c */
315extern unsigned long totalram_pages;
cb45b0e9 316extern unsigned long totalreserve_pages;
ebec3862
ZY
317extern unsigned long nr_free_buffer_pages(void);
318extern unsigned long nr_free_pagecache_pages(void);
1da177e4 319
c41f012a
MH
320/* Definition of global_zone_page_state not available yet */
321#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
96177299
CL
322
323
1da177e4 324/* linux/mm/swap.c */
c53954a0 325extern void lru_cache_add(struct page *);
2329d375
JZ
326extern void lru_cache_add_anon(struct page *page);
327extern void lru_cache_add_file(struct page *page);
fa9add64 328extern void lru_add_page_tail(struct page *page, struct page *page_tail,
5bc7b8ac 329 struct lruvec *lruvec, struct list_head *head);
b3c97528
HH
330extern void activate_page(struct page *);
331extern void mark_page_accessed(struct page *);
1da177e4 332extern void lru_add_drain(void);
f0cb3c76 333extern void lru_add_drain_cpu(int cpu);
5fbc4616 334extern void lru_add_drain_all(void);
ac6aadb2 335extern void rotate_reclaimable_page(struct page *page);
cc5993bd 336extern void deactivate_file_page(struct page *page);
f7ad2a6c 337extern void mark_page_lazyfree(struct page *page);
1da177e4
LT
338extern void swap_setup(void);
339
00501b53
JW
340extern void lru_cache_add_active_or_unevictable(struct page *page,
341 struct vm_area_struct *vma);
342
1da177e4 343/* linux/mm/vmscan.c */
5a1c84b4 344extern unsigned long zone_reclaimable_pages(struct zone *zone);
dac1d27b 345extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 346 gfp_t gfp_mask, nodemask_t *mask);
f3fd4a61 347extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
b70a2a21
JW
348extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
349 unsigned long nr_pages,
350 gfp_t gfp_mask,
351 bool may_swap);
a9dd0a83 352extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
185efc0f 353 gfp_t gfp_mask, bool noswap,
ef8f2327 354 pg_data_t *pgdat,
185efc0f 355 unsigned long *nr_scanned);
69e05944 356extern unsigned long shrink_all_memory(unsigned long nr_pages);
1da177e4 357extern int vm_swappiness;
b20a3503 358extern int remove_mapping(struct address_space *mapping, struct page *page);
b21e0b90 359extern unsigned long vm_total_pages;
b20a3503 360
9eeff239 361#ifdef CONFIG_NUMA
a5f5f91d 362extern int node_reclaim_mode;
9614634f 363extern int sysctl_min_unmapped_ratio;
0ff38490 364extern int sysctl_min_slab_ratio;
a5f5f91d 365extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
9eeff239 366#else
a5f5f91d
MG
367#define node_reclaim_mode 0
368static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
369 unsigned int order)
9eeff239
CL
370{
371 return 0;
372}
373#endif
374
39b5f29a 375extern int page_evictable(struct page *page);
24513264 376extern void check_move_unevictable_pages(struct page **, int nr_pages);
af936a16 377
3218ae14 378extern int kswapd_run(int nid);
8fe23e05 379extern void kswapd_stop(int nid);
33398cf2 380
1da177e4 381#ifdef CONFIG_SWAP
be297968
CH
382
383#include <linux/blk_types.h> /* for bio_end_io_t */
384
1da177e4 385/* linux/mm/page_io.c */
23955622 386extern int swap_readpage(struct page *page, bool do_poll);
1da177e4 387extern int swap_writepage(struct page *page, struct writeback_control *wbc);
4246a0b6 388extern void end_swap_bio_write(struct bio *bio);
1eec6702 389extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
4246a0b6 390 bio_end_io_t end_write_func);
62c230bc 391extern int swap_set_page_dirty(struct page *page);
1da177e4 392
a509bc1a
MG
393int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
394 unsigned long nr_pages, sector_t start_block);
395int generic_swapfile_activate(struct swap_info_struct *, struct file *,
396 sector_t *);
397
1da177e4 398/* linux/mm/swap_state.c */
4b3ef9da
HY
399/* One swap address space for each 64M swap space */
400#define SWAP_ADDRESS_SPACE_SHIFT 14
401#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
402extern struct address_space *swapper_spaces[];
403#define swap_address_space(entry) \
404 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
405 >> SWAP_ADDRESS_SPACE_SHIFT])
33806f06 406extern unsigned long total_swapcache_pages(void);
1da177e4 407extern void show_swap_cache_info(void);
0f074658 408extern int add_to_swap(struct page *page);
73b1262f 409extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
2f772e6c 410extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
1da177e4
LT
411extern void __delete_from_swap_cache(struct page *);
412extern void delete_from_swap_cache(struct page *);
1da177e4
LT
413extern void free_page_and_swap_cache(struct page *);
414extern void free_pages_and_swap_cache(struct page **, int);
ec560175
HY
415extern struct page *lookup_swap_cache(swp_entry_t entry,
416 struct vm_area_struct *vma,
417 unsigned long addr);
02098fea 418extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
23955622
SL
419 struct vm_area_struct *vma, unsigned long addr,
420 bool do_poll);
5b999aad
DS
421extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
422 struct vm_area_struct *vma, unsigned long addr,
423 bool *new_page_allocated);
e9e9b7ec
MK
424extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
425 struct vm_fault *vmf);
426extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
427 struct vm_fault *vmf);
ec560175 428
1da177e4 429/* linux/mm/swapfile.c */
ec8acf20 430extern atomic_long_t nr_swap_pages;
1da177e4 431extern long total_swap_pages;
81a0298b 432extern atomic_t nr_rotate_swap;
67afa38e 433extern bool has_usable_swap(void);
ec8acf20
SL
434
435/* Swap 50% full? Release swapcache more aggressively.. */
436static inline bool vm_swap_full(void)
437{
438 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
439}
440
441static inline long get_nr_swap_pages(void)
442{
443 return atomic_long_read(&nr_swap_pages);
444}
445
1da177e4 446extern void si_swapinfo(struct sysinfo *);
38d8b4e6 447extern swp_entry_t get_swap_page(struct page *page);
75f6d6d2 448extern void put_swap_page(struct page *page, swp_entry_t entry);
910321ea 449extern swp_entry_t get_swap_page_of_type(int);
5d5e8f19 450extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
570a335b 451extern int add_swap_count_continuation(swp_entry_t, gfp_t);
aaa46865 452extern void swap_shmem_alloc(swp_entry_t);
570a335b
HD
453extern int swap_duplicate(swp_entry_t);
454extern int swapcache_prepare(swp_entry_t);
1da177e4 455extern void swap_free(swp_entry_t);
7c00bafe 456extern void swapcache_free_entries(swp_entry_t *entries, int n);
2509ef26 457extern int free_swap_and_cache(swp_entry_t);
7bf23687 458extern int swap_type_of(dev_t, sector_t, struct block_device **);
f577eb30 459extern unsigned int count_swap_pages(int, int);
d4906e1a 460extern sector_t map_swap_page(struct page *, struct block_device **);
3aef83e0 461extern sector_t swapdev_block(int, pgoff_t);
bde05d1c 462extern int page_swapcount(struct page *);
aa8d22a1 463extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
e8c26ab6 464extern int __swp_swapcount(swp_entry_t entry);
8334b962 465extern int swp_swapcount(swp_entry_t entry);
f981c595 466extern struct swap_info_struct *page_swap_info(struct page *);
0bcac06f 467extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
6d0a07ed 468extern bool reuse_swap_page(struct page *, int *);
a2c43eed 469extern int try_to_free_swap(struct page *);
1da177e4 470struct backing_dev_info;
4b3ef9da
HY
471extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
472extern void exit_swap_address_space(unsigned int type);
1da177e4 473
1da177e4
LT
474#else /* CONFIG_SWAP */
475
0bcac06f
MK
476static inline int swap_readpage(struct page *page, bool do_poll)
477{
478 return 0;
479}
480
481static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
482{
483 return NULL;
484}
485
d2cf5ad6 486#define swap_address_space(entry) (NULL)
ec8acf20 487#define get_nr_swap_pages() 0L
b962716b 488#define total_swap_pages 0L
33806f06 489#define total_swapcache_pages() 0UL
ec8acf20 490#define vm_swap_full() 0
1da177e4
LT
491
492#define si_swapinfo(val) \
493 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
9ae5b3c7 494/* only sparc can not include linux/pagemap.h in this file
ea1754a0 495 * so leave put_page and release_pages undeclared... */
1da177e4 496#define free_page_and_swap_cache(page) \
09cbfeaf 497 put_page(page)
1da177e4 498#define free_pages_and_swap_cache(pages, nr) \
c6f92f9f 499 release_pages((pages), (nr));
1da177e4 500
bd96b9eb
CK
501static inline void show_swap_cache_info(void)
502{
503}
504
5042db43
JG
505#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
506#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
bd96b9eb 507
570a335b 508static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
355cfa73 509{
570a335b
HD
510 return 0;
511}
512
aaa46865
HD
513static inline void swap_shmem_alloc(swp_entry_t swp)
514{
515}
516
570a335b
HD
517static inline int swap_duplicate(swp_entry_t swp)
518{
519 return 0;
355cfa73
KH
520}
521
bd96b9eb
CK
522static inline void swap_free(swp_entry_t swp)
523{
524}
525
75f6d6d2 526static inline void put_swap_page(struct page *page, swp_entry_t swp)
cb4b86ba
KH
527{
528}
529
e9e9b7ec
MK
530static inline struct page *swap_cluster_readahead(swp_entry_t entry,
531 gfp_t gfp_mask, struct vm_fault *vmf)
bd96b9eb
CK
532{
533 return NULL;
534}
535
e9e9b7ec
MK
536static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
537 struct vm_fault *vmf)
ec560175
HY
538{
539 return NULL;
540}
541
9fab5619
HD
542static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
543{
544 return 0;
545}
546
ec560175
HY
547static inline struct page *lookup_swap_cache(swp_entry_t swp,
548 struct vm_area_struct *vma,
549 unsigned long addr)
bd96b9eb
CK
550{
551 return NULL;
552}
553
0f074658 554static inline int add_to_swap(struct page *page)
60371d97
HD
555{
556 return 0;
557}
558
73b1262f
HD
559static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
560 gfp_t gfp_mask)
bd96b9eb 561{
73b1262f 562 return -1;
bd96b9eb
CK
563}
564
565static inline void __delete_from_swap_cache(struct page *page)
566{
567}
568
569static inline void delete_from_swap_cache(struct page *page)
570{
571}
572
bde05d1c
HD
573static inline int page_swapcount(struct page *page)
574{
575 return 0;
576}
577
aa8d22a1
MK
578static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
579{
580 return 0;
581}
582
e8c26ab6
TC
583static inline int __swp_swapcount(swp_entry_t entry)
584{
585 return 0;
586}
587
8334b962
MK
588static inline int swp_swapcount(swp_entry_t entry)
589{
590 return 0;
591}
592
ba3c4ce6
HY
593#define reuse_swap_page(page, total_map_swapcount) \
594 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
1da177e4 595
a2c43eed 596static inline int try_to_free_swap(struct page *page)
68a22394
RR
597{
598 return 0;
599}
600
38d8b4e6 601static inline swp_entry_t get_swap_page(struct page *page)
1da177e4
LT
602{
603 swp_entry_t entry;
604 entry.val = 0;
605 return entry;
606}
607
1da177e4 608#endif /* CONFIG_SWAP */
6f2cb2f1 609
59807685
HY
610#ifdef CONFIG_THP_SWAP
611extern int split_swap_cluster(swp_entry_t entry);
612#else
613static inline int split_swap_cluster(swp_entry_t entry)
614{
615 return 0;
616}
617#endif
618
6f2cb2f1
VD
619#ifdef CONFIG_MEMCG
620static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
621{
4550c4e1
JW
622 /* Cgroup2 doesn't have per-cgroup swappiness */
623 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
624 return vm_swappiness;
625
6f2cb2f1
VD
626 /* root ? */
627 if (mem_cgroup_disabled() || !memcg->css.parent)
628 return vm_swappiness;
629
630 return memcg->swappiness;
631}
6f2cb2f1
VD
632#else
633static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
634{
635 return vm_swappiness;
636}
637#endif
638
2cf85583
TH
639#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
640extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
641 gfp_t gfp_mask);
642#else
643static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
644 int node, gfp_t gfp_mask)
645{
646}
647#endif
648
6f2cb2f1
VD
649#ifdef CONFIG_MEMCG_SWAP
650extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
651extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
38d8b4e6 652extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
d8b38438 653extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
5ccc5aba 654extern bool mem_cgroup_swap_full(struct page *page);
6f2cb2f1
VD
655#else
656static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
657{
658}
659
660static inline int mem_cgroup_try_charge_swap(struct page *page,
661 swp_entry_t entry)
662{
663 return 0;
664}
665
38d8b4e6
HY
666static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
667 unsigned int nr_pages)
6f2cb2f1
VD
668{
669}
d8b38438
VD
670
671static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
672{
673 return get_nr_swap_pages();
674}
5ccc5aba
VD
675
676static inline bool mem_cgroup_swap_full(struct page *page)
677{
678 return vm_swap_full();
679}
6f2cb2f1
VD
680#endif
681
1da177e4
LT
682#endif /* __KERNEL__*/
683#endif /* _LINUX_SWAP_H */