]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
riscv: Split out measure_cycles() for reuse
authorNam Cao <namcao@linutronix.de>
Sat, 4 Apr 2026 01:28:47 +0000 (19:28 -0600)
committerPaul Walmsley <pjw@kernel.org>
Sun, 5 Apr 2026 00:42:44 +0000 (18:42 -0600)
Byte cycle measurement and word cycle measurement of scalar misaligned
access are very similar. Split these parts out into a common
measure_cycles() function to avoid duplication.

This function will also be reused for vector misaligned access probe in a
follow-up commit.

Signed-off-by: Nam Cao <namcao@linutronix.de>
Link: https://patch.msgid.link/50d0598e45acc56c95176e52fbbe56e1f4becc84.1770830596.git.namcao@linutronix.de
Signed-off-by: Paul Walmsley <pjw@kernel.org>
arch/riscv/kernel/unaligned_access_speed.c

index 1f4c128d739662ee1dbc0430462726c0675ce333..31b431dd5d01ece068598cc67496a1a3dbb2c6d2 100644 (file)
@@ -31,30 +31,15 @@ static long unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNO
 static cpumask_t fast_misaligned_access;
 
 #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
-static int check_unaligned_access(void *param)
+static u64 measure_cycles(void (*func)(void *dst, const void *src, size_t len),
+                         void *dst, void *src, size_t len)
 {
-       int cpu = smp_processor_id();
-       u64 start_cycles, end_cycles;
-       u64 word_cycles;
-       u64 byte_cycles;
+       u64 start_cycles, end_cycles, cycles = -1ULL;
        u64 start_ns;
-       int ratio;
-       struct page *page = param;
-       void *dst;
-       void *src;
-       long speed = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
 
-       if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
-               return 0;
-
-       /* Make an unaligned destination buffer. */
-       dst = (void *)((unsigned long)page_address(page) | 0x1);
-       /* Unalign src as well, but differently (off by 1 + 2 = 3). */
-       src = dst + (MISALIGNED_BUFFER_SIZE / 2);
-       src += 2;
-       word_cycles = -1ULL;
        /* Do a warmup. */
-       __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+       func(dst, src, len);
+
        preempt_disable();
 
        /*
@@ -66,29 +51,41 @@ static int check_unaligned_access(void *param)
                start_cycles = get_cycles64();
                /* Ensure the CSR read can't reorder WRT to the copy. */
                mb();
-               __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+               func(dst, src, len);
                /* Ensure the copy ends before the end time is snapped. */
                mb();
                end_cycles = get_cycles64();
-               if ((end_cycles - start_cycles) < word_cycles)
-                       word_cycles = end_cycles - start_cycles;
+               if ((end_cycles - start_cycles) < cycles)
+                       cycles = end_cycles - start_cycles;
        }
 
-       byte_cycles = -1ULL;
-       __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+       preempt_enable();
 
-       start_ns = ktime_get_mono_fast_ns();
-       while (ktime_get_mono_fast_ns() < start_ns + MISALIGNED_ACCESS_NS) {
-               start_cycles = get_cycles64();
-               mb();
-               __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
-               mb();
-               end_cycles = get_cycles64();
-               if ((end_cycles - start_cycles) < byte_cycles)
-                       byte_cycles = end_cycles - start_cycles;
-       }
+       return cycles;
+}
 
-       preempt_enable();
+static int check_unaligned_access(void *param)
+{
+       int cpu = smp_processor_id();
+       u64 word_cycles;
+       u64 byte_cycles;
+       int ratio;
+       struct page *page = param;
+       void *dst;
+       void *src;
+       long speed = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
+
+       if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
+               return 0;
+
+       /* Make an unaligned destination buffer. */
+       dst = (void *)((unsigned long)page_address(page) | 0x1);
+       /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+       src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+       src += 2;
+
+       word_cycles = measure_cycles(__riscv_copy_words_unaligned, dst, src, MISALIGNED_COPY_SIZE);
+       byte_cycles = measure_cycles(__riscv_copy_bytes_unaligned, dst, src, MISALIGNED_COPY_SIZE);
 
        /* Don't divide by zero. */
        if (!word_cycles || !byte_cycles) {