]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
selftests: riscv: verify syscalls discard vector context
authorSergey Matyukevich <geomatsi@gmail.com>
Mon, 26 Jan 2026 04:09:57 +0000 (21:09 -0700)
committerPaul Walmsley <pjw@kernel.org>
Mon, 9 Feb 2026 22:27:33 +0000 (15:27 -0700)
Add a test to v_ptrace test suite to verify that vector csr registers
are clobbered on syscalls.

Signed-off-by: Sergey Matyukevich <geomatsi@gmail.com>
Reviewed-by: Andy Chiu <andybnac@gmail.com>
Tested-by: Andy Chiu <andybnac@gmail.com>
Link: https://patch.msgid.link/20251214163537.1054292-8-geomatsi@gmail.com
[pjw@kernel.org: cleaned up a checkpatch issue]
Signed-off-by: Paul Walmsley <pjw@kernel.org>
tools/testing/selftests/riscv/vector/validate_v_ptrace.c

index 768ef93b33dae7d65cdb872ec91749411966224b..7ff9a0cf229c0fc1cb8a4608a9a00c9412276662 100644 (file)
@@ -211,4 +211,127 @@ TEST(ptrace_v_early_debug)
        }
 }
 
+TEST(ptrace_v_syscall_clobbering)
+{
+       pid_t pid;
+
+       if (!is_vector_supported() && !is_xtheadvector_supported())
+               SKIP(return, "Vector not supported");
+
+       chld_lock = 1;
+       pid = fork();
+       ASSERT_LE(0, pid)
+               TH_LOG("fork: %m");
+
+       if (pid == 0) {
+               unsigned long vl;
+
+               while (chld_lock == 1)
+                       asm volatile("" : : "g"(chld_lock) : "memory");
+
+               if (is_xtheadvector_supported()) {
+                       asm volatile (
+                               // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
+                               // vsetvli      t4, x0, e16, m2, d1
+                               ".4byte         0b00000000010100000111111011010111\n"
+                               "mv             %[new_vl], t4\n"
+                               : [new_vl] "=r" (vl) : : "t4");
+               } else {
+                       asm volatile (
+                               ".option push\n"
+                               ".option arch, +zve32x\n"
+                               "vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
+                               ".option pop\n"
+                               : [new_vl] "=r"(vl) : : );
+               }
+
+               while (1) {
+                       asm volatile (
+                               ".option push\n"
+                               ".option norvc\n"
+                               "ebreak\n"
+                               ".option pop\n");
+
+                       sleep(0);
+               }
+       } else {
+               struct __riscv_v_regset_state *regset_data;
+               unsigned long vlenb = get_vr_len();
+               struct user_regs_struct regs;
+               size_t regset_size;
+               struct iovec iov;
+               int status;
+
+               /* attach */
+
+               ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
+               ASSERT_EQ(pid, waitpid(pid, &status, 0));
+               ASSERT_TRUE(WIFSTOPPED(status));
+
+               /* unlock */
+
+               ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
+
+               /* resume and wait for the 1st ebreak */
+
+               ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+               ASSERT_EQ(pid, waitpid(pid, &status, 0));
+               ASSERT_TRUE(WIFSTOPPED(status));
+
+               /* read tracee vector csr regs using ptrace GETREGSET */
+
+               regset_size = sizeof(*regset_data) + vlenb * 32;
+               regset_data = calloc(1, regset_size);
+
+               iov.iov_base = regset_data;
+               iov.iov_len = regset_size;
+
+               ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
+
+               /* verify initial vsetvli settings */
+
+               if (is_xtheadvector_supported())
+                       EXPECT_EQ(5UL, regset_data->vtype);
+               else
+                       EXPECT_EQ(9UL, regset_data->vtype);
+
+               EXPECT_EQ(regset_data->vlenb, regset_data->vl);
+               EXPECT_EQ(vlenb, regset_data->vlenb);
+               EXPECT_EQ(0UL, regset_data->vstart);
+               EXPECT_EQ(0UL, regset_data->vcsr);
+
+               /* skip 1st ebreak, then resume and wait for the 2nd ebreak */
+
+               iov.iov_base = &regs;
+               iov.iov_len = sizeof(regs);
+
+               ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
+               regs.pc += 4;
+               ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
+
+               ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+               ASSERT_EQ(pid, waitpid(pid, &status, 0));
+               ASSERT_TRUE(WIFSTOPPED(status));
+
+               /* read tracee vtype using ptrace GETREGSET */
+
+               iov.iov_base = regset_data;
+               iov.iov_len = regset_size;
+
+               ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
+
+               /* verify that V state is illegal after syscall */
+
+               EXPECT_EQ((1UL << (__riscv_xlen - 1)), regset_data->vtype);
+               EXPECT_EQ(vlenb, regset_data->vlenb);
+               EXPECT_EQ(0UL, regset_data->vstart);
+               EXPECT_EQ(0UL, regset_data->vcsr);
+               EXPECT_EQ(0UL, regset_data->vl);
+
+               /* cleanup */
+
+               ASSERT_EQ(0, kill(pid, SIGKILL));
+       }
+}
+
 TEST_HARNESS_MAIN