1 // SPDX-License-Identifier: GPL-2.0-only
3 * FP/SIMD context switching and fault handling
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/cpu.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/kernel.h>
18 #include <linux/linkage.h>
19 #include <linux/irqflags.h>
20 #include <linux/init.h>
21 #include <linux/percpu.h>
22 #include <linux/prctl.h>
23 #include <linux/preempt.h>
24 #include <linux/ptrace.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/signal.h>
28 #include <linux/slab.h>
29 #include <linux/stddef.h>
30 #include <linux/sysctl.h>
31 #include <linux/swab.h>
34 #include <asm/fpsimd.h>
35 #include <asm/cpufeature.h>
36 #include <asm/cputype.h>
37 #include <asm/processor.h>
39 #include <asm/sigcontext.h>
40 #include <asm/sysreg.h>
41 #include <asm/traps.h>
44 #define FPEXC_IOF (1 << 0)
45 #define FPEXC_DZF (1 << 1)
46 #define FPEXC_OFF (1 << 2)
47 #define FPEXC_UFF (1 << 3)
48 #define FPEXC_IXF (1 << 4)
49 #define FPEXC_IDF (1 << 7)
52 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
54 * In order to reduce the number of times the FPSIMD state is needlessly saved
55 * and restored, we need to keep track of two things:
56 * (a) for each task, we need to remember which CPU was the last one to have
57 * the task's FPSIMD state loaded into its FPSIMD registers;
58 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
59 * been loaded into its FPSIMD registers most recently, or whether it has
60 * been used to perform kernel mode NEON in the meantime.
62 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
63 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
64 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
65 * address of the userland FPSIMD state of the task that was loaded onto the CPU
66 * the most recently, or NULL if kernel mode NEON has been performed after that.
68 * With this in place, we no longer have to restore the next FPSIMD state right
69 * when switching between tasks. Instead, we can defer this check to userland
70 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
71 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
72 * can omit the FPSIMD restore.
74 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
75 * indicate whether or not the userland FPSIMD state of the current task is
76 * present in the registers. The flag is set unless the FPSIMD registers of this
77 * CPU currently contain the most recent userland FPSIMD state of the current
80 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
81 * save the task's FPSIMD context back to task_struct from softirq context.
82 * To prevent this from racing with the manipulation of the task's FPSIMD state
83 * from task context and thereby corrupting the state, it is necessary to
84 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
85 * flag with local_bh_disable() unless softirqs are already masked.
87 * For a certain task, the sequence may look something like this:
88 * - the task gets scheduled in; if both the task's fpsimd_cpu field
89 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
90 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
91 * cleared, otherwise it is set;
93 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
94 * userland FPSIMD state is copied from memory to the registers, the task's
95 * fpsimd_cpu field is set to the id of the current CPU, the current
96 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
97 * TIF_FOREIGN_FPSTATE flag is cleared;
99 * - the task executes an ordinary syscall; upon return to userland, the
100 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
103 * - the task executes a syscall which executes some NEON instructions; this is
104 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
105 * register contents to memory, clears the fpsimd_last_state per-cpu variable
106 * and sets the TIF_FOREIGN_FPSTATE flag;
108 * - the task gets preempted after kernel_neon_end() is called; as we have not
109 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
110 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
112 struct fpsimd_last_state_struct
{
113 struct user_fpsimd_state
*st
;
118 static DEFINE_PER_CPU(struct fpsimd_last_state_struct
, fpsimd_last_state
);
120 /* Default VL for tasks that don't set it explicitly: */
121 static int sve_default_vl
= -1;
123 #ifdef CONFIG_ARM64_SVE
125 /* Maximum supported vector length across all CPUs (initially poisoned) */
126 int __ro_after_init sve_max_vl
= SVE_VL_MIN
;
127 int __ro_after_init sve_max_virtualisable_vl
= SVE_VL_MIN
;
130 * Set of available vector lengths,
131 * where length vq encoded as bit __vq_to_bit(vq):
133 __ro_after_init
DECLARE_BITMAP(sve_vq_map
, SVE_VQ_MAX
);
134 /* Set of vector lengths present on at least one cpu: */
135 static __ro_after_init
DECLARE_BITMAP(sve_vq_partial_map
, SVE_VQ_MAX
);
137 static void __percpu
*efi_sve_state
;
139 #else /* ! CONFIG_ARM64_SVE */
141 /* Dummy declaration for code that will be optimised out: */
142 extern __ro_after_init
DECLARE_BITMAP(sve_vq_map
, SVE_VQ_MAX
);
143 extern __ro_after_init
DECLARE_BITMAP(sve_vq_partial_map
, SVE_VQ_MAX
);
144 extern void __percpu
*efi_sve_state
;
146 #endif /* ! CONFIG_ARM64_SVE */
149 * Call __sve_free() directly only if you know task can't be scheduled
152 static void __sve_free(struct task_struct
*task
)
154 kfree(task
->thread
.sve_state
);
155 task
->thread
.sve_state
= NULL
;
158 static void sve_free(struct task_struct
*task
)
160 WARN_ON(test_tsk_thread_flag(task
, TIF_SVE
));
166 * TIF_SVE controls whether a task can use SVE without trapping while
167 * in userspace, and also the way a task's FPSIMD/SVE state is stored
170 * The kernel uses this flag to track whether a user task is actively
171 * using SVE, and therefore whether full SVE register state needs to
172 * be tracked. If not, the cheaper FPSIMD context handling code can
173 * be used instead of the more costly SVE equivalents.
177 * The task can execute SVE instructions while in userspace without
178 * trapping to the kernel.
180 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
181 * corresponding Zn), P0-P15 and FFR are encoded in in
182 * task->thread.sve_state, formatted appropriately for vector
183 * length task->thread.sve_vl.
185 * task->thread.sve_state must point to a valid buffer at least
186 * sve_state_size(task) bytes in size.
188 * During any syscall, the kernel may optionally clear TIF_SVE and
189 * discard the vector state except for the FPSIMD subset.
193 * An attempt by the user task to execute an SVE instruction causes
194 * do_sve_acc() to be called, which does some preparation and then
197 * When stored, FPSIMD registers V0-V31 are encoded in
198 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
199 * logically zero but not stored anywhere; P0-P15 and FFR are not
200 * stored and have unspecified values from userspace's point of
201 * view. For hygiene purposes, the kernel zeroes them on next use,
202 * but userspace is discouraged from relying on this.
204 * task->thread.sve_state does not need to be non-NULL, valid or any
205 * particular size: it must not be dereferenced.
207 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
208 * irrespective of whether TIF_SVE is clear or set, since these are
209 * not vector length dependent.
213 * Update current's FPSIMD/SVE registers from thread_struct.
215 * This function should be called only when the FPSIMD/SVE state in
216 * thread_struct is known to be up to date, when preparing to enter
219 * Softirqs (and preemption) must be disabled.
221 static void task_fpsimd_load(void)
223 WARN_ON(!in_softirq() && !irqs_disabled());
225 if (system_supports_sve() && test_thread_flag(TIF_SVE
))
226 sve_load_state(sve_pffr(¤t
->thread
),
227 ¤t
->thread
.uw
.fpsimd_state
.fpsr
,
228 sve_vq_from_vl(current
->thread
.sve_vl
) - 1);
230 fpsimd_load_state(¤t
->thread
.uw
.fpsimd_state
);
234 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
235 * date with respect to the CPU registers.
237 * Softirqs (and preemption) must be disabled.
239 void fpsimd_save(void)
241 struct fpsimd_last_state_struct
const *last
=
242 this_cpu_ptr(&fpsimd_last_state
);
243 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
245 WARN_ON(!in_softirq() && !irqs_disabled());
247 if (!test_thread_flag(TIF_FOREIGN_FPSTATE
)) {
248 if (system_supports_sve() && test_thread_flag(TIF_SVE
)) {
249 if (WARN_ON(sve_get_vl() != last
->sve_vl
)) {
251 * Can't save the user regs, so current would
252 * re-enter user with corrupt state.
253 * There's no way to recover, so kill it:
255 force_signal_inject(SIGKILL
, SI_KERNEL
, 0);
259 sve_save_state((char *)last
->sve_state
+
260 sve_ffr_offset(last
->sve_vl
),
263 fpsimd_save_state(last
->st
);
268 * All vector length selection from userspace comes through here.
269 * We're on a slow path, so some sanity-checks are included.
270 * If things go wrong there's a bug somewhere, but try to fall back to a
273 static unsigned int find_supported_vector_length(unsigned int vl
)
276 int max_vl
= sve_max_vl
;
278 if (WARN_ON(!sve_vl_valid(vl
)))
281 if (WARN_ON(!sve_vl_valid(max_vl
)))
287 bit
= find_next_bit(sve_vq_map
, SVE_VQ_MAX
,
288 __vq_to_bit(sve_vq_from_vl(vl
)));
289 return sve_vl_from_vq(__bit_to_vq(bit
));
294 static int sve_proc_do_default_vl(struct ctl_table
*table
, int write
,
295 void __user
*buffer
, size_t *lenp
,
299 int vl
= sve_default_vl
;
300 struct ctl_table tmp_table
= {
302 .maxlen
= sizeof(vl
),
305 ret
= proc_dointvec(&tmp_table
, write
, buffer
, lenp
, ppos
);
309 /* Writing -1 has the special meaning "set to max": */
313 if (!sve_vl_valid(vl
))
316 sve_default_vl
= find_supported_vector_length(vl
);
320 static struct ctl_table sve_default_vl_table
[] = {
322 .procname
= "sve_default_vector_length",
324 .proc_handler
= sve_proc_do_default_vl
,
329 static int __init
sve_sysctl_init(void)
331 if (system_supports_sve())
332 if (!register_sysctl("abi", sve_default_vl_table
))
338 #else /* ! CONFIG_SYSCTL */
339 static int __init
sve_sysctl_init(void) { return 0; }
340 #endif /* ! CONFIG_SYSCTL */
342 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
343 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
345 #ifdef CONFIG_CPU_BIG_ENDIAN
346 static __uint128_t
arm64_cpu_to_le128(__uint128_t x
)
349 u64 b
= swab64(x
>> 64);
351 return ((__uint128_t
)a
<< 64) | b
;
354 static __uint128_t
arm64_cpu_to_le128(__uint128_t x
)
360 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
363 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
364 * task->thread.sve_state.
366 * Task can be a non-runnable task, or current. In the latter case,
367 * softirqs (and preemption) must be disabled.
368 * task->thread.sve_state must point to at least sve_state_size(task)
369 * bytes of allocated kernel memory.
370 * task->thread.uw.fpsimd_state must be up to date before calling this
373 static void fpsimd_to_sve(struct task_struct
*task
)
376 void *sst
= task
->thread
.sve_state
;
377 struct user_fpsimd_state
const *fst
= &task
->thread
.uw
.fpsimd_state
;
381 if (!system_supports_sve())
384 vq
= sve_vq_from_vl(task
->thread
.sve_vl
);
385 for (i
= 0; i
< 32; ++i
) {
386 p
= (__uint128_t
*)ZREG(sst
, vq
, i
);
387 *p
= arm64_cpu_to_le128(fst
->vregs
[i
]);
392 * Transfer the SVE state in task->thread.sve_state to
393 * task->thread.uw.fpsimd_state.
395 * Task can be a non-runnable task, or current. In the latter case,
396 * softirqs (and preemption) must be disabled.
397 * task->thread.sve_state must point to at least sve_state_size(task)
398 * bytes of allocated kernel memory.
399 * task->thread.sve_state must be up to date before calling this function.
401 static void sve_to_fpsimd(struct task_struct
*task
)
404 void const *sst
= task
->thread
.sve_state
;
405 struct user_fpsimd_state
*fst
= &task
->thread
.uw
.fpsimd_state
;
407 __uint128_t
const *p
;
409 if (!system_supports_sve())
412 vq
= sve_vq_from_vl(task
->thread
.sve_vl
);
413 for (i
= 0; i
< 32; ++i
) {
414 p
= (__uint128_t
const *)ZREG(sst
, vq
, i
);
415 fst
->vregs
[i
] = arm64_le128_to_cpu(*p
);
419 #ifdef CONFIG_ARM64_SVE
422 * Return how many bytes of memory are required to store the full SVE
423 * state for task, given task's currently configured vector length.
425 size_t sve_state_size(struct task_struct
const *task
)
427 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task
->thread
.sve_vl
));
431 * Ensure that task->thread.sve_state is allocated and sufficiently large.
433 * This function should be used only in preparation for replacing
434 * task->thread.sve_state with new data. The memory is always zeroed
435 * here to prevent stale data from showing through: this is done in
436 * the interest of testability and predictability: except in the
437 * do_sve_acc() case, there is no ABI requirement to hide stale data
438 * written previously be task.
440 void sve_alloc(struct task_struct
*task
)
442 if (task
->thread
.sve_state
) {
443 memset(task
->thread
.sve_state
, 0, sve_state_size(current
));
447 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
448 task
->thread
.sve_state
=
449 kzalloc(sve_state_size(task
), GFP_KERNEL
);
452 * If future SVE revisions can have larger vectors though,
453 * this may cease to be true:
455 BUG_ON(!task
->thread
.sve_state
);
460 * Ensure that task->thread.sve_state is up to date with respect to
461 * the user task, irrespective of when SVE is in use or not.
463 * This should only be called by ptrace. task must be non-runnable.
464 * task->thread.sve_state must point to at least sve_state_size(task)
465 * bytes of allocated kernel memory.
467 void fpsimd_sync_to_sve(struct task_struct
*task
)
469 if (!test_tsk_thread_flag(task
, TIF_SVE
))
474 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
475 * the user task, irrespective of whether SVE is in use or not.
477 * This should only be called by ptrace. task must be non-runnable.
478 * task->thread.sve_state must point to at least sve_state_size(task)
479 * bytes of allocated kernel memory.
481 void sve_sync_to_fpsimd(struct task_struct
*task
)
483 if (test_tsk_thread_flag(task
, TIF_SVE
))
488 * Ensure that task->thread.sve_state is up to date with respect to
489 * the task->thread.uw.fpsimd_state.
491 * This should only be called by ptrace to merge new FPSIMD register
492 * values into a task for which SVE is currently active.
493 * task must be non-runnable.
494 * task->thread.sve_state must point to at least sve_state_size(task)
495 * bytes of allocated kernel memory.
496 * task->thread.uw.fpsimd_state must already have been initialised with
497 * the new FPSIMD register values to be merged in.
499 void sve_sync_from_fpsimd_zeropad(struct task_struct
*task
)
502 void *sst
= task
->thread
.sve_state
;
503 struct user_fpsimd_state
const *fst
= &task
->thread
.uw
.fpsimd_state
;
507 if (!test_tsk_thread_flag(task
, TIF_SVE
))
510 vq
= sve_vq_from_vl(task
->thread
.sve_vl
);
512 memset(sst
, 0, SVE_SIG_REGS_SIZE(vq
));
514 for (i
= 0; i
< 32; ++i
) {
515 p
= (__uint128_t
*)ZREG(sst
, vq
, i
);
516 *p
= arm64_cpu_to_le128(fst
->vregs
[i
]);
520 int sve_set_vector_length(struct task_struct
*task
,
521 unsigned long vl
, unsigned long flags
)
523 if (flags
& ~(unsigned long)(PR_SVE_VL_INHERIT
|
524 PR_SVE_SET_VL_ONEXEC
))
527 if (!sve_vl_valid(vl
))
531 * Clamp to the maximum vector length that VL-agnostic SVE code can
532 * work with. A flag may be assigned in the future to allow setting
533 * of larger vector lengths without confusing older software.
535 if (vl
> SVE_VL_ARCH_MAX
)
536 vl
= SVE_VL_ARCH_MAX
;
538 vl
= find_supported_vector_length(vl
);
540 if (flags
& (PR_SVE_VL_INHERIT
|
541 PR_SVE_SET_VL_ONEXEC
))
542 task
->thread
.sve_vl_onexec
= vl
;
544 /* Reset VL to system default on next exec: */
545 task
->thread
.sve_vl_onexec
= 0;
547 /* Only actually set the VL if not deferred: */
548 if (flags
& PR_SVE_SET_VL_ONEXEC
)
551 if (vl
== task
->thread
.sve_vl
)
555 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
556 * write any live register state back to task_struct, and convert to a
559 if (task
== current
) {
565 fpsimd_flush_task_state(task
);
566 if (test_and_clear_tsk_thread_flag(task
, TIF_SVE
))
573 * Force reallocation of task SVE state to the correct size
578 task
->thread
.sve_vl
= vl
;
581 update_tsk_thread_flag(task
, TIF_SVE_VL_INHERIT
,
582 flags
& PR_SVE_VL_INHERIT
);
588 * Encode the current vector length and flags for return.
589 * This is only required for prctl(): ptrace has separate fields
591 * flags are as for sve_set_vector_length().
593 static int sve_prctl_status(unsigned long flags
)
597 if (flags
& PR_SVE_SET_VL_ONEXEC
)
598 ret
= current
->thread
.sve_vl_onexec
;
600 ret
= current
->thread
.sve_vl
;
602 if (test_thread_flag(TIF_SVE_VL_INHERIT
))
603 ret
|= PR_SVE_VL_INHERIT
;
609 int sve_set_current_vl(unsigned long arg
)
611 unsigned long vl
, flags
;
614 vl
= arg
& PR_SVE_VL_LEN_MASK
;
617 if (!system_supports_sve())
620 ret
= sve_set_vector_length(current
, vl
, flags
);
624 return sve_prctl_status(flags
);
628 int sve_get_current_vl(void)
630 if (!system_supports_sve())
633 return sve_prctl_status(0);
636 static void sve_probe_vqs(DECLARE_BITMAP(map
, SVE_VQ_MAX
))
641 bitmap_zero(map
, SVE_VQ_MAX
);
643 zcr
= ZCR_ELx_LEN_MASK
;
644 zcr
= read_sysreg_s(SYS_ZCR_EL1
) & ~zcr
;
646 for (vq
= SVE_VQ_MAX
; vq
>= SVE_VQ_MIN
; --vq
) {
647 write_sysreg_s(zcr
| (vq
- 1), SYS_ZCR_EL1
); /* self-syncing */
649 vq
= sve_vq_from_vl(vl
); /* skip intervening lengths */
650 set_bit(__vq_to_bit(vq
), map
);
655 * Initialise the set of known supported VQs for the boot CPU.
656 * This is called during kernel boot, before secondary CPUs are brought up.
658 void __init
sve_init_vq_map(void)
660 sve_probe_vqs(sve_vq_map
);
661 bitmap_copy(sve_vq_partial_map
, sve_vq_map
, SVE_VQ_MAX
);
665 * If we haven't committed to the set of supported VQs yet, filter out
666 * those not supported by the current CPU.
667 * This function is called during the bring-up of early secondary CPUs only.
669 void sve_update_vq_map(void)
671 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
673 sve_probe_vqs(tmp_map
);
674 bitmap_and(sve_vq_map
, sve_vq_map
, tmp_map
, SVE_VQ_MAX
);
675 bitmap_or(sve_vq_partial_map
, sve_vq_partial_map
, tmp_map
, SVE_VQ_MAX
);
679 * Check whether the current CPU supports all VQs in the committed set.
680 * This function is called during the bring-up of late secondary CPUs only.
682 int sve_verify_vq_map(void)
684 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
687 sve_probe_vqs(tmp_map
);
689 bitmap_complement(tmp_map
, tmp_map
, SVE_VQ_MAX
);
690 if (bitmap_intersects(tmp_map
, sve_vq_map
, SVE_VQ_MAX
)) {
691 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
696 if (!IS_ENABLED(CONFIG_KVM
) || !is_hyp_mode_available())
700 * For KVM, it is necessary to ensure that this CPU doesn't
701 * support any vector length that guests may have probed as
705 /* Recover the set of supported VQs: */
706 bitmap_complement(tmp_map
, tmp_map
, SVE_VQ_MAX
);
707 /* Find VQs supported that are not globally supported: */
708 bitmap_andnot(tmp_map
, tmp_map
, sve_vq_map
, SVE_VQ_MAX
);
710 /* Find the lowest such VQ, if any: */
711 b
= find_last_bit(tmp_map
, SVE_VQ_MAX
);
713 return 0; /* no mismatches */
716 * Mismatches above sve_max_virtualisable_vl are fine, since
717 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
719 if (sve_vl_from_vq(__bit_to_vq(b
)) <= sve_max_virtualisable_vl
) {
720 pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
728 static void __init
sve_efi_setup(void)
730 if (!IS_ENABLED(CONFIG_EFI
))
734 * alloc_percpu() warns and prints a backtrace if this goes wrong.
735 * This is evidence of a crippled system and we are returning void,
736 * so no attempt is made to handle this situation here.
738 if (!sve_vl_valid(sve_max_vl
))
741 efi_sve_state
= __alloc_percpu(
742 SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl
)), SVE_VQ_BYTES
);
749 panic("Cannot allocate percpu memory for EFI SVE save/restore");
753 * Enable SVE for EL1.
754 * Intended for use by the cpufeatures code during CPU boot.
756 void sve_kernel_enable(const struct arm64_cpu_capabilities
*__always_unused p
)
758 write_sysreg(read_sysreg(CPACR_EL1
) | CPACR_EL1_ZEN_EL1EN
, CPACR_EL1
);
763 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
766 * Use only if SVE is present.
767 * This function clobbers the SVE vector length.
769 u64
read_zcr_features(void)
775 * Set the maximum possible VL, and write zeroes to all other
776 * bits to see if they stick.
778 sve_kernel_enable(NULL
);
779 write_sysreg_s(ZCR_ELx_LEN_MASK
, SYS_ZCR_EL1
);
781 zcr
= read_sysreg_s(SYS_ZCR_EL1
);
782 zcr
&= ~(u64
)ZCR_ELx_LEN_MASK
; /* find sticky 1s outside LEN field */
783 vq_max
= sve_vq_from_vl(sve_get_vl());
784 zcr
|= vq_max
- 1; /* set LEN field to maximum effective value */
789 void __init
sve_setup(void)
792 DECLARE_BITMAP(tmp_map
, SVE_VQ_MAX
);
795 if (!system_supports_sve())
799 * The SVE architecture mandates support for 128-bit vectors,
800 * so sve_vq_map must have at least SVE_VQ_MIN set.
801 * If something went wrong, at least try to patch it up:
803 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN
), sve_vq_map
)))
804 set_bit(__vq_to_bit(SVE_VQ_MIN
), sve_vq_map
);
806 zcr
= read_sanitised_ftr_reg(SYS_ZCR_EL1
);
807 sve_max_vl
= sve_vl_from_vq((zcr
& ZCR_ELx_LEN_MASK
) + 1);
810 * Sanity-check that the max VL we determined through CPU features
811 * corresponds properly to sve_vq_map. If not, do our best:
813 if (WARN_ON(sve_max_vl
!= find_supported_vector_length(sve_max_vl
)))
814 sve_max_vl
= find_supported_vector_length(sve_max_vl
);
817 * For the default VL, pick the maximum supported value <= 64.
818 * VL == 64 is guaranteed not to grow the signal frame.
820 sve_default_vl
= find_supported_vector_length(64);
822 bitmap_andnot(tmp_map
, sve_vq_partial_map
, sve_vq_map
,
825 b
= find_last_bit(tmp_map
, SVE_VQ_MAX
);
827 /* No non-virtualisable VLs found */
828 sve_max_virtualisable_vl
= SVE_VQ_MAX
;
829 else if (WARN_ON(b
== SVE_VQ_MAX
- 1))
830 /* No virtualisable VLs? This is architecturally forbidden. */
831 sve_max_virtualisable_vl
= SVE_VQ_MIN
;
832 else /* b + 1 < SVE_VQ_MAX */
833 sve_max_virtualisable_vl
= sve_vl_from_vq(__bit_to_vq(b
+ 1));
835 if (sve_max_virtualisable_vl
> sve_max_vl
)
836 sve_max_virtualisable_vl
= sve_max_vl
;
838 pr_info("SVE: maximum available vector length %u bytes per vector\n",
840 pr_info("SVE: default vector length %u bytes per vector\n",
843 /* KVM decides whether to support mismatched systems. Just warn here: */
844 if (sve_max_virtualisable_vl
< sve_max_vl
)
845 pr_warn("SVE: unvirtualisable vector lengths present\n");
851 * Called from the put_task_struct() path, which cannot get here
852 * unless dead_task is really dead and not schedulable.
854 void fpsimd_release_task(struct task_struct
*dead_task
)
856 __sve_free(dead_task
);
859 #endif /* CONFIG_ARM64_SVE */
864 * Storage is allocated for the full SVE state, the current FPSIMD
865 * register contents are migrated across, and TIF_SVE is set so that
866 * the SVE access trap will be disabled the next time this task
867 * reaches ret_to_user.
869 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
870 * would have disabled the SVE access trap for userspace during
871 * ret_to_user, making an SVE access trap impossible in that case.
873 asmlinkage
void do_sve_acc(unsigned int esr
, struct pt_regs
*regs
)
875 /* Even if we chose not to use SVE, the hardware could still trap: */
876 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
877 force_signal_inject(SIGILL
, ILL_ILLOPC
, regs
->pc
);
887 /* Force ret_to_user to reload the registers: */
888 fpsimd_flush_task_state(current
);
890 fpsimd_to_sve(current
);
891 if (test_and_set_thread_flag(TIF_SVE
))
892 WARN_ON(1); /* SVE access shouldn't have trapped */
898 * Trapped FP/ASIMD access.
900 asmlinkage
void do_fpsimd_acc(unsigned int esr
, struct pt_regs
*regs
)
902 /* TODO: implement lazy context saving/restoring */
907 * Raise a SIGFPE for the current process.
909 asmlinkage
void do_fpsimd_exc(unsigned int esr
, struct pt_regs
*regs
)
911 unsigned int si_code
= FPE_FLTUNK
;
913 if (esr
& ESR_ELx_FP_EXC_TFV
) {
915 si_code
= FPE_FLTINV
;
916 else if (esr
& FPEXC_DZF
)
917 si_code
= FPE_FLTDIV
;
918 else if (esr
& FPEXC_OFF
)
919 si_code
= FPE_FLTOVF
;
920 else if (esr
& FPEXC_UFF
)
921 si_code
= FPE_FLTUND
;
922 else if (esr
& FPEXC_IXF
)
923 si_code
= FPE_FLTRES
;
926 send_sig_fault(SIGFPE
, si_code
,
927 (void __user
*)instruction_pointer(regs
),
931 void fpsimd_thread_switch(struct task_struct
*next
)
933 bool wrong_task
, wrong_cpu
;
935 if (!system_supports_fpsimd())
938 /* Save unsaved fpsimd state, if any: */
942 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
943 * state. For kernel threads, FPSIMD registers are never loaded
944 * and wrong_task and wrong_cpu will always be true.
946 wrong_task
= __this_cpu_read(fpsimd_last_state
.st
) !=
947 &next
->thread
.uw
.fpsimd_state
;
948 wrong_cpu
= next
->thread
.fpsimd_cpu
!= smp_processor_id();
950 update_tsk_thread_flag(next
, TIF_FOREIGN_FPSTATE
,
951 wrong_task
|| wrong_cpu
);
954 void fpsimd_flush_thread(void)
956 int vl
, supported_vl
;
958 if (!system_supports_fpsimd())
963 fpsimd_flush_task_state(current
);
964 memset(¤t
->thread
.uw
.fpsimd_state
, 0,
965 sizeof(current
->thread
.uw
.fpsimd_state
));
967 if (system_supports_sve()) {
968 clear_thread_flag(TIF_SVE
);
972 * Reset the task vector length as required.
973 * This is where we ensure that all user tasks have a valid
974 * vector length configured: no kernel task can become a user
975 * task without an exec and hence a call to this function.
976 * By the time the first call to this function is made, all
977 * early hardware probing is complete, so sve_default_vl
979 * If a bug causes this to go wrong, we make some noise and
980 * try to fudge thread.sve_vl to a safe value here.
982 vl
= current
->thread
.sve_vl_onexec
?
983 current
->thread
.sve_vl_onexec
: sve_default_vl
;
985 if (WARN_ON(!sve_vl_valid(vl
)))
988 supported_vl
= find_supported_vector_length(vl
);
989 if (WARN_ON(supported_vl
!= vl
))
992 current
->thread
.sve_vl
= vl
;
995 * If the task is not set to inherit, ensure that the vector
996 * length will be reset by a subsequent exec:
998 if (!test_thread_flag(TIF_SVE_VL_INHERIT
))
999 current
->thread
.sve_vl_onexec
= 0;
1006 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1007 * currently held in the registers does in fact belong to 'current'
1009 void fpsimd_preserve_current_state(void)
1011 if (!system_supports_fpsimd())
1020 * Like fpsimd_preserve_current_state(), but ensure that
1021 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1024 void fpsimd_signal_preserve_current_state(void)
1026 fpsimd_preserve_current_state();
1027 if (system_supports_sve() && test_thread_flag(TIF_SVE
))
1028 sve_to_fpsimd(current
);
1032 * Associate current's FPSIMD context with this cpu
1033 * Preemption must be disabled when calling this function.
1035 void fpsimd_bind_task_to_cpu(void)
1037 struct fpsimd_last_state_struct
*last
=
1038 this_cpu_ptr(&fpsimd_last_state
);
1040 last
->st
= ¤t
->thread
.uw
.fpsimd_state
;
1041 last
->sve_state
= current
->thread
.sve_state
;
1042 last
->sve_vl
= current
->thread
.sve_vl
;
1043 current
->thread
.fpsimd_cpu
= smp_processor_id();
1045 if (system_supports_sve()) {
1046 /* Toggle SVE trapping for userspace if needed */
1047 if (test_thread_flag(TIF_SVE
))
1052 /* Serialised by exception return to user */
1056 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state
*st
, void *sve_state
,
1057 unsigned int sve_vl
)
1059 struct fpsimd_last_state_struct
*last
=
1060 this_cpu_ptr(&fpsimd_last_state
);
1062 WARN_ON(!in_softirq() && !irqs_disabled());
1065 last
->sve_state
= sve_state
;
1066 last
->sve_vl
= sve_vl
;
1070 * Load the userland FPSIMD state of 'current' from memory, but only if the
1071 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1072 * state of 'current'
1074 void fpsimd_restore_current_state(void)
1076 if (!system_supports_fpsimd())
1081 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE
)) {
1083 fpsimd_bind_task_to_cpu();
1090 * Load an updated userland FPSIMD state for 'current' from memory and set the
1091 * flag that indicates that the FPSIMD register contents are the most recent
1092 * FPSIMD state of 'current'
1094 void fpsimd_update_current_state(struct user_fpsimd_state
const *state
)
1096 if (!system_supports_fpsimd())
1101 current
->thread
.uw
.fpsimd_state
= *state
;
1102 if (system_supports_sve() && test_thread_flag(TIF_SVE
))
1103 fpsimd_to_sve(current
);
1106 fpsimd_bind_task_to_cpu();
1108 clear_thread_flag(TIF_FOREIGN_FPSTATE
);
1114 * Invalidate live CPU copies of task t's FPSIMD state
1116 * This function may be called with preemption enabled. The barrier()
1117 * ensures that the assignment to fpsimd_cpu is visible to any
1118 * preemption/softirq that could race with set_tsk_thread_flag(), so
1119 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1121 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1124 void fpsimd_flush_task_state(struct task_struct
*t
)
1126 t
->thread
.fpsimd_cpu
= NR_CPUS
;
1129 set_tsk_thread_flag(t
, TIF_FOREIGN_FPSTATE
);
1135 * Invalidate any task's FPSIMD state that is present on this cpu.
1136 * This function must be called with softirqs disabled.
1138 void fpsimd_flush_cpu_state(void)
1140 __this_cpu_write(fpsimd_last_state
.st
, NULL
);
1141 set_thread_flag(TIF_FOREIGN_FPSTATE
);
1144 #ifdef CONFIG_KERNEL_MODE_NEON
1146 DEFINE_PER_CPU(bool, kernel_neon_busy
);
1147 EXPORT_PER_CPU_SYMBOL(kernel_neon_busy
);
1150 * Kernel-side NEON support functions
1154 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1157 * Must not be called unless may_use_simd() returns true.
1158 * Task context in the FPSIMD registers is saved back to memory as necessary.
1160 * A matching call to kernel_neon_end() must be made before returning from the
1163 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1166 void kernel_neon_begin(void)
1168 if (WARN_ON(!system_supports_fpsimd()))
1171 BUG_ON(!may_use_simd());
1175 __this_cpu_write(kernel_neon_busy
, true);
1177 /* Save unsaved fpsimd state, if any: */
1180 /* Invalidate any task state remaining in the fpsimd regs: */
1181 fpsimd_flush_cpu_state();
1187 EXPORT_SYMBOL(kernel_neon_begin
);
1190 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1192 * Must be called from a context in which kernel_neon_begin() was previously
1193 * called, with no call to kernel_neon_end() in the meantime.
1195 * The caller must not use the FPSIMD registers after this function is called,
1196 * unless kernel_neon_begin() is called again in the meantime.
1198 void kernel_neon_end(void)
1202 if (!system_supports_fpsimd())
1205 busy
= __this_cpu_xchg(kernel_neon_busy
, false);
1206 WARN_ON(!busy
); /* No matching kernel_neon_begin()? */
1210 EXPORT_SYMBOL(kernel_neon_end
);
1214 static DEFINE_PER_CPU(struct user_fpsimd_state
, efi_fpsimd_state
);
1215 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used
);
1216 static DEFINE_PER_CPU(bool, efi_sve_state_used
);
1219 * EFI runtime services support functions
1221 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1222 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1223 * is always used rather than being an optional accelerator.
1225 * These functions provide the necessary support for ensuring FPSIMD
1226 * save/restore in the contexts from which EFI is used.
1228 * Do not use them for any other purpose -- if tempted to do so, you are
1229 * either doing something wrong or you need to propose some refactoring.
1233 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1235 void __efi_fpsimd_begin(void)
1237 if (!system_supports_fpsimd())
1240 WARN_ON(preemptible());
1242 if (may_use_simd()) {
1243 kernel_neon_begin();
1246 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1249 if (system_supports_sve() && likely(efi_sve_state
)) {
1250 char *sve_state
= this_cpu_ptr(efi_sve_state
);
1252 __this_cpu_write(efi_sve_state_used
, true);
1254 sve_save_state(sve_state
+ sve_ffr_offset(sve_max_vl
),
1255 &this_cpu_ptr(&efi_fpsimd_state
)->fpsr
);
1257 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state
));
1260 __this_cpu_write(efi_fpsimd_state_used
, true);
1265 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1267 void __efi_fpsimd_end(void)
1269 if (!system_supports_fpsimd())
1272 if (!__this_cpu_xchg(efi_fpsimd_state_used
, false)) {
1275 if (system_supports_sve() &&
1276 likely(__this_cpu_read(efi_sve_state_used
))) {
1277 char const *sve_state
= this_cpu_ptr(efi_sve_state
);
1279 sve_load_state(sve_state
+ sve_ffr_offset(sve_max_vl
),
1280 &this_cpu_ptr(&efi_fpsimd_state
)->fpsr
,
1281 sve_vq_from_vl(sve_get_vl()) - 1);
1283 __this_cpu_write(efi_sve_state_used
, false);
1285 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state
));
1290 #endif /* CONFIG_EFI */
1292 #endif /* CONFIG_KERNEL_MODE_NEON */
1294 #ifdef CONFIG_CPU_PM
1295 static int fpsimd_cpu_pm_notifier(struct notifier_block
*self
,
1296 unsigned long cmd
, void *v
)
1301 fpsimd_flush_cpu_state();
1305 case CPU_PM_ENTER_FAILED
:
1312 static struct notifier_block fpsimd_cpu_pm_notifier_block
= {
1313 .notifier_call
= fpsimd_cpu_pm_notifier
,
1316 static void __init
fpsimd_pm_init(void)
1318 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block
);
1322 static inline void fpsimd_pm_init(void) { }
1323 #endif /* CONFIG_CPU_PM */
1325 #ifdef CONFIG_HOTPLUG_CPU
1326 static int fpsimd_cpu_dead(unsigned int cpu
)
1328 per_cpu(fpsimd_last_state
.st
, cpu
) = NULL
;
1332 static inline void fpsimd_hotplug_init(void)
1334 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD
, "arm64/fpsimd:dead",
1335 NULL
, fpsimd_cpu_dead
);
1339 static inline void fpsimd_hotplug_init(void) { }
1343 * FP/SIMD support code initialisation.
1345 static int __init
fpsimd_init(void)
1347 if (cpu_have_named_feature(FP
)) {
1349 fpsimd_hotplug_init();
1351 pr_notice("Floating-point is not implemented\n");
1354 if (!cpu_have_named_feature(ASIMD
))
1355 pr_notice("Advanced SIMD is not implemented\n");
1357 return sve_sysctl_init();
1359 core_initcall(fpsimd_init
);