Patch series "mm, swap: swap table phase III: remove swap_map", v3.
This series removes the static swap_map and uses the swap table for the
swap count directly. This saves about ~30% memory usage for the static
swap metadata. For example, this saves 256MB of memory when mounting a
1TB swap device. Performance is slightly better too, since the double
update of the swap table and swap_map is now gone.
Test results:
Mounting a swap device:
=======================
Mount a 1TB brd device as SWAP, just to verify the memory save:
`free -m` before:
total used free shared buff/cache available
Mem: 1465 1051 417 1 61 413
Swap:
1054435 0
1054435
`free -m` after:
total used free shared buff/cache available
Mem: 1465 795 672 1 62 670
Swap:
1054435 0
1054435
Idle memory usage is reduced by ~256MB just as expected. And following
this design we should be able to save another ~512MB in a next phase.
Build kernel test:
==================
Test using ZSWAP with NVME SWAP, make -j48, defconfig, in a x86_64 VM
with 5G RAM, under global pressure, avg of 32 test run:
Before After:
System time: 1038.97s 1013.75s (-2.4%)
Test using ZRAM as SWAP, make -j12, tinyconfig, in a ARM64 VM with 1.5G
RAM, under global pressure, avg of 32 test run:
Before After:
System time: 67.75s 66.65s (-1.6%)
The result is slightly better.
Redis / Valkey benchmark:
=========================
Test using ZRAM as SWAP, in a ARM64 VM with 1.5G RAM, under global pressure,
avg of 64 test run:
Server: valkey-server --maxmemory 2560M
Client: redis-benchmark -r
3000000 -n
3000000 -d 1024 -c 12 -P 32 -t get
no persistence with BGSAVE
Before: 472705.71 RPS 369451.68 RPS
After: 481197.93 RPS (+1.8%) 374922.32 RPS (+1.5%)
In conclusion, performance is better in all cases, and memory usage is
much lower.
The swap cgroup array will also be merged into the swap table in a later
phase, saving the other ~60% part of the static swap metadata and making
all the swap metadata dynamic. The improved API for swap operations also
reduces the lock contention and makes more batching operations possible.
This patch (of 12):
/proc/swaps uses si->swap_map as the indicator to check if the swap
device is mounted. swap_map will be removed soon, so change it to use
si->swap_file instead because:
- si->swap_file is exactly the only dynamic content that /proc/swaps is
interested in. Previously, it was checking si->swap_map just to ensure
si->swap_file is available. si->swap_map is set under mutex
protection, and after si->swap_file is set, so having si->swap_map set
guarantees si->swap_file is set.
- Checking si->flags doesn't work here. SWP_WRITEOK is cleared during
swapoff, but /proc/swaps is supposed to show the device under swapoff
too to report the swapoff progress. And SWP_USED is set even if the
device hasn't been properly set up.
We can have another flag, but the easier way is to just check
si->swap_file directly. So protect si->swap_file setting with mutext,
and set si->swap_file only when the swap device is truly enabled.
/proc/swaps only interested in si->swap_file and a few static data
reading. Only si->swap_file needs protection. Reading other static
fields is always fine.
Link: https://lkml.kernel.org/r/20260218-swap-table-p3-v3-0-f4e34be021a7@tencent.com
Link: https://lkml.kernel.org/r/20260218-swap-table-p3-v3-1-f4e34be021a7@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: kernel test robot <lkp@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static struct kmem_cache *swap_table_cachep;
+/* Protects si->swap_file for /proc/swaps usage */
static DEFINE_MUTEX(swapon_mutex);
static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/*
* Free all of a swapdev's extent information
*/
-static void destroy_swap_extents(struct swap_info_struct *sis)
+static void destroy_swap_extents(struct swap_info_struct *sis,
+ struct file *swap_file)
{
while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
struct rb_node *rb = sis->swap_extent_root.rb_node;
}
if (sis->flags & SWP_ACTIVATED) {
- struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
sis->flags &= ~SWP_ACTIVATED;
* Typically it is in the 1-4 megabyte range. So we can have hundreds of
* extents in the rbtree. - akpm.
*/
-static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
+static int setup_swap_extents(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
{
- struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
int ret;
sis->flags |= SWP_ACTIVATED;
if ((sis->flags & SWP_FS_OPS) &&
sio_pool_init() != 0) {
- destroy_swap_extents(sis);
+ destroy_swap_extents(sis, swap_file);
return -ENOMEM;
}
return ret;
flush_work(&p->reclaim_work);
flush_percpu_swap_cluster(p);
- destroy_swap_extents(p);
+ destroy_swap_extents(p, p->swap_file);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
return SEQ_START_TOKEN;
for (type = 0; (si = swap_type_to_info(type)); type++) {
- if (!(si->flags & SWP_USED) || !si->swap_map)
+ if (!(si->swap_file))
continue;
if (!--l)
return si;
++(*pos);
for (; (si = swap_type_to_info(type)); type++) {
- if (!(si->flags & SWP_USED) || !si->swap_map)
+ if (!(si->swap_file))
continue;
return si;
}
goto bad_swap;
}
- si->swap_file = swap_file;
mapping = swap_file->f_mapping;
dentry = swap_file->f_path.dentry;
inode = mapping->host;
si->max = maxpages;
si->pages = maxpages - 1;
- nr_extents = setup_swap_extents(si, &span);
+ nr_extents = setup_swap_extents(si, swap_file, &span);
if (nr_extents < 0) {
error = nr_extents;
goto bad_swap_unlock_inode;
prio = DEF_SWAP_PRIO;
if (swap_flags & SWAP_FLAG_PREFER)
prio = swap_flags & SWAP_FLAG_PRIO_MASK;
+
+ si->swap_file = swap_file;
enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
kfree(si->global_cluster);
si->global_cluster = NULL;
inode = NULL;
- destroy_swap_extents(si);
+ destroy_swap_extents(si, swap_file);
swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
- si->swap_file = NULL;
si->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);