]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));"
authorwang lian <lianux.mm@gmail.com>
Thu, 17 Jul 2025 13:18:56 +0000 (21:18 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:41 +0000 (19:12 -0700)
Patch series "selftests/mm: reuse FORCE_READ to replace "asm volatile("" :
"+r" (XXX));" and some cleanup", v2.

This series introduces a common FORCE_READ() macro to replace the cryptic
asm volatile("" : "+r" (variable)); construct used in several mm
selftests.  This improves code readability and maintainability by removing
duplicated, hard-to-understand code.

This patch (of 2):

Several mm selftests use the `asm volatile("" : "+r" (variable));`
construct to force a read of a variable, preventing the compiler from
optimizing away the memory access.  This idiom is cryptic and duplicated
across multiple test files.

Following a suggestion from David[1], this patch refactors this common
pattern into a FORCE_READ() macro

Link: https://lkml.kernel.org/r/20250717131857.59909-1-lianux.mm@gmail.com
Link: https://lkml.kernel.org/r/20250717131857.59909-2-lianux.mm@gmail.com
Link: https://lore.kernel.org/lkml/4a3e0759-caa1-4cfa-bc3f-402593f1eee3@redhat.com/
Signed-off-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
tools/testing/selftests/mm/cow.c
tools/testing/selftests/mm/guard-regions.c
tools/testing/selftests/mm/hugetlb-madvise.c
tools/testing/selftests/mm/migration.c
tools/testing/selftests/mm/pagemap_ioctl.c
tools/testing/selftests/mm/split_huge_page_test.c
tools/testing/selftests/mm/vm_util.h

index 788e82b00b755431bfd71eec65d827c99c1c51b6..d30625c18259b99aeb0edad782940b30f1babb5e 100644 (file)
@@ -1534,7 +1534,7 @@ static void test_ro_fast_pin(char *mem, const char *smem, size_t size)
 
 static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
 {
-       char *mem, *smem, tmp;
+       char *mem, *smem;
 
        log_test_start("%s ... with shared zeropage", desc);
 
@@ -1554,8 +1554,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
        }
 
        /* Read from the page to populate the shared zeropage. */
-       tmp = *mem + *smem;
-       asm volatile("" : "+r" (tmp));
+       FORCE_READ(mem);
+       FORCE_READ(smem);
 
        fn(mem, smem, pagesize);
 munmap:
@@ -1566,7 +1566,7 @@ munmap:
 
 static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
 {
-       char *mem, *smem, *mmap_mem, *mmap_smem, tmp;
+       char *mem, *smem, *mmap_mem, *mmap_smem;
        size_t mmap_size;
        int ret;
 
@@ -1617,8 +1617,8 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc)
         * the first sub-page and test if we get another sub-page populated
         * automatically.
         */
-       tmp = *mem + *smem;
-       asm volatile("" : "+r" (tmp));
+       FORCE_READ(mem);
+       FORCE_READ(smem);
        if (!pagemap_is_populated(pagemap_fd, mem + pagesize) ||
            !pagemap_is_populated(pagemap_fd, smem + pagesize)) {
                ksft_test_result_skip("Did not get THPs populated\n");
@@ -1634,7 +1634,7 @@ munmap:
 
 static void run_with_memfd(non_anon_test_fn fn, const char *desc)
 {
-       char *mem, *smem, tmp;
+       char *mem, *smem;
        int fd;
 
        log_test_start("%s ... with memfd", desc);
@@ -1668,8 +1668,8 @@ static void run_with_memfd(non_anon_test_fn fn, const char *desc)
        }
 
        /* Fault the page in. */
-       tmp = *mem + *smem;
-       asm volatile("" : "+r" (tmp));
+       FORCE_READ(mem);
+       FORCE_READ(smem);
 
        fn(mem, smem, pagesize);
 munmap:
@@ -1682,7 +1682,7 @@ close:
 
 static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
 {
-       char *mem, *smem, tmp;
+       char *mem, *smem;
        FILE *file;
        int fd;
 
@@ -1724,8 +1724,8 @@ static void run_with_tmpfile(non_anon_test_fn fn, const char *desc)
        }
 
        /* Fault the page in. */
-       tmp = *mem + *smem;
-       asm volatile("" : "+r" (tmp));
+       FORCE_READ(mem);
+       FORCE_READ(smem);
 
        fn(mem, smem, pagesize);
 munmap:
@@ -1740,7 +1740,7 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
                                   size_t hugetlbsize)
 {
        int flags = MFD_HUGETLB;
-       char *mem, *smem, tmp;
+       char *mem, *smem;
        int fd;
 
        log_test_start("%s ... with memfd hugetlb (%zu kB)", desc,
@@ -1778,8 +1778,8 @@ static void run_with_memfd_hugetlb(non_anon_test_fn fn, const char *desc,
        }
 
        /* Fault the page in. */
-       tmp = *mem + *smem;
-       asm volatile("" : "+r" (tmp));
+       FORCE_READ(mem);
+       FORCE_READ(smem);
 
        fn(mem, smem, hugetlbsize);
 munmap:
index 93af3d3760f987a2620e87ce5e567912e3dd62cc..4b76e72e70531c94fdbacc64d5195678839de66e 100644 (file)
 static volatile sig_atomic_t signal_jump_set;
 static sigjmp_buf signal_jmp_buf;
 
-/*
- * Ignore the checkpatch warning, we must read from x but don't want to do
- * anything with it in order to trigger a read page fault. We therefore must use
- * volatile to stop the compiler from optimising this away.
- */
-#define FORCE_READ(x) (*(volatile typeof(x) *)x)
-
 /*
  * How is the test backing the mapping being tested?
  */
index e74107185324fb8dfd768cba25218ab469612fec..1afe14b9dc0c34a5e12ec7af67bdd745795a3ec7 100644 (file)
@@ -47,14 +47,11 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
 
 void read_fault_pages(void *addr, unsigned long nr_pages)
 {
-       volatile unsigned long dummy = 0;
        unsigned long i;
 
        for (i = 0; i < nr_pages; i++) {
-               dummy += *((unsigned long *)(addr + (i * huge_page_size)));
-
                /* Prevent the compiler from optimizing out the entire loop: */
-               asm volatile("" : "+r" (dummy));
+               FORCE_READ(((unsigned long *)(addr + (i * huge_page_size))));
        }
 }
 
index a306f8bab0870019092a64c15eb7b32bf6c919bc..c5a73617796aeb6f99fd52889447149366d7ce28 100644 (file)
@@ -16,6 +16,7 @@
 #include <sys/types.h>
 #include <signal.h>
 #include <time.h>
+#include "vm_util.h"
 
 #define TWOMEG         (2<<20)
 #define RUNTIME                (20)
@@ -103,15 +104,13 @@ int migrate(uint64_t *ptr, int n1, int n2)
 
 void *access_mem(void *ptr)
 {
-       volatile uint64_t y = 0;
-       volatile uint64_t *x = ptr;
-
        while (1) {
                pthread_testcancel();
-               y += *x;
-
-               /* Prevent the compiler from optimizing out the writes to y: */
-               asm volatile("" : "+r" (y));
+               /* Force a read from the memory pointed to by ptr. This ensures
+                * the memory access actually happens and prevents the compiler
+                * from optimizing away this entire loop.
+                */
+               FORCE_READ((uint64_t *)ptr);
        }
 
        return NULL;
index c2dcda78ad31c424482146d29144ec262f0bb5cf..0d4209eef0c3d4c5ea5365ea9c8f08fd7c4643d3 100644 (file)
@@ -1525,9 +1525,7 @@ void zeropfn_tests(void)
 
        ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
        if (!ret) {
-               char tmp = *mem;
-
-               asm volatile("" : "+r" (tmp));
+               FORCE_READ(mem);
 
                ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
                                    0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
index aa7400ed0e99468cd6a047e8ab29cbb8bd100419..6640748ff221bb028a4ded00ef6ff2fb962b2833 100644 (file)
@@ -398,7 +398,6 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
                char **addr)
 {
        size_t i;
-       int dummy = 0;
        unsigned char buf[1024];
 
        srand(time(NULL));
@@ -440,8 +439,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
        madvise(*addr, fd_size, MADV_HUGEPAGE);
 
        for (size_t i = 0; i < fd_size; i++)
-               dummy += *(*addr + i);
-       asm volatile("" : "+r" (dummy));
+               FORCE_READ((*addr + i));
 
        if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
                ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
index 2b154c2875915ee9c8755059333606b612756230..c20298ae98ea5c5395da66c59e43a5c3b64c762d 100644 (file)
 #define PM_SWAP                       BIT_ULL(62)
 #define PM_PRESENT                    BIT_ULL(63)
 
+/*
+ * Ignore the checkpatch warning, we must read from x but don't want to do
+ * anything with it in order to trigger a read page fault. We therefore must use
+ * volatile to stop the compiler from optimising this away.
+ */
+#define FORCE_READ(x) (*(volatile typeof(x) *)x)
+
 extern unsigned int __page_size;
 extern unsigned int __page_shift;