]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2c020ed8 CM |
2 | /* |
3 | * Based on arch/arm/kernel/signal.c | |
4 | * | |
5 | * Copyright (C) 1995-2009 Russell King | |
6 | * Copyright (C) 2012 ARM Ltd. | |
2c020ed8 CM |
7 | */ |
8 | ||
94b07c1f | 9 | #include <linux/cache.h> |
fd92d4a5 | 10 | #include <linux/compat.h> |
2c020ed8 | 11 | #include <linux/errno.h> |
20987de3 | 12 | #include <linux/kernel.h> |
2c020ed8 CM |
13 | #include <linux/signal.h> |
14 | #include <linux/personality.h> | |
15 | #include <linux/freezer.h> | |
47ccb028 | 16 | #include <linux/stddef.h> |
2c020ed8 | 17 | #include <linux/uaccess.h> |
33f08261 | 18 | #include <linux/sizes.h> |
bb4891a6 | 19 | #include <linux/string.h> |
2c020ed8 CM |
20 | #include <linux/tracehook.h> |
21 | #include <linux/ratelimit.h> | |
cf7de27a | 22 | #include <linux/syscalls.h> |
2c020ed8 | 23 | |
8d66772e | 24 | #include <asm/daifflags.h> |
2c020ed8 CM |
25 | #include <asm/debug-monitors.h> |
26 | #include <asm/elf.h> | |
27 | #include <asm/cacheflush.h> | |
28 | #include <asm/ucontext.h> | |
29 | #include <asm/unistd.h> | |
30 | #include <asm/fpsimd.h> | |
17c28958 | 31 | #include <asm/ptrace.h> |
2c020ed8 | 32 | #include <asm/signal32.h> |
f71016a8 | 33 | #include <asm/traps.h> |
2c020ed8 CM |
34 | #include <asm/vdso.h> |
35 | ||
36 | /* | |
37 | * Do a signal return; undo the signal stack. These are aligned to 128-bit. | |
38 | */ | |
39 | struct rt_sigframe { | |
40 | struct siginfo info; | |
41 | struct ucontext uc; | |
20987de3 DM |
42 | }; |
43 | ||
44 | struct frame_record { | |
304ef4e8 WD |
45 | u64 fp; |
46 | u64 lr; | |
2c020ed8 CM |
47 | }; |
48 | ||
20987de3 DM |
49 | struct rt_sigframe_user_layout { |
50 | struct rt_sigframe __user *sigframe; | |
51 | struct frame_record __user *next_frame; | |
bb4891a6 DM |
52 | |
53 | unsigned long size; /* size of allocated sigframe data */ | |
54 | unsigned long limit; /* largest allowed size */ | |
55 | ||
56 | unsigned long fpsimd_offset; | |
57 | unsigned long esr_offset; | |
8cd969d2 | 58 | unsigned long sve_offset; |
33f08261 | 59 | unsigned long extra_offset; |
bb4891a6 | 60 | unsigned long end_offset; |
20987de3 DM |
61 | }; |
62 | ||
33f08261 DM |
63 | #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) |
64 | #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) | |
65 | #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) | |
66 | ||
bb4891a6 DM |
67 | static void init_user_layout(struct rt_sigframe_user_layout *user) |
68 | { | |
33f08261 DM |
69 | const size_t reserved_size = |
70 | sizeof(user->sigframe->uc.uc_mcontext.__reserved); | |
71 | ||
bb4891a6 DM |
72 | memset(user, 0, sizeof(*user)); |
73 | user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); | |
74 | ||
33f08261 DM |
75 | user->limit = user->size + reserved_size; |
76 | ||
77 | user->limit -= TERMINATOR_SIZE; | |
78 | user->limit -= EXTRA_CONTEXT_SIZE; | |
79 | /* Reserve space for extension and terminator ^ */ | |
bb4891a6 DM |
80 | } |
81 | ||
82 | static size_t sigframe_size(struct rt_sigframe_user_layout const *user) | |
83 | { | |
84 | return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); | |
85 | } | |
86 | ||
33f08261 DM |
87 | /* |
88 | * Sanity limit on the approximate maximum size of signal frame we'll | |
89 | * try to generate. Stack alignment padding and the frame record are | |
90 | * not taken into account. This limit is not a guarantee and is | |
91 | * NOT ABI. | |
92 | */ | |
93 | #define SIGFRAME_MAXSZ SZ_64K | |
94 | ||
95 | static int __sigframe_alloc(struct rt_sigframe_user_layout *user, | |
96 | unsigned long *offset, size_t size, bool extend) | |
97 | { | |
98 | size_t padded_size = round_up(size, 16); | |
99 | ||
100 | if (padded_size > user->limit - user->size && | |
101 | !user->extra_offset && | |
102 | extend) { | |
103 | int ret; | |
104 | ||
105 | user->limit += EXTRA_CONTEXT_SIZE; | |
106 | ret = __sigframe_alloc(user, &user->extra_offset, | |
107 | sizeof(struct extra_context), false); | |
108 | if (ret) { | |
109 | user->limit -= EXTRA_CONTEXT_SIZE; | |
110 | return ret; | |
111 | } | |
112 | ||
113 | /* Reserve space for the __reserved[] terminator */ | |
114 | user->size += TERMINATOR_SIZE; | |
115 | ||
116 | /* | |
117 | * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for | |
118 | * the terminator: | |
119 | */ | |
120 | user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; | |
121 | } | |
122 | ||
123 | /* Still not enough space? Bad luck! */ | |
124 | if (padded_size > user->limit - user->size) | |
125 | return -ENOMEM; | |
126 | ||
127 | *offset = user->size; | |
128 | user->size += padded_size; | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
bb4322f7 DM |
133 | /* |
134 | * Allocate space for an optional record of <size> bytes in the user | |
135 | * signal frame. The offset from the signal frame base address to the | |
136 | * allocated block is assigned to *offset. | |
137 | */ | |
138 | static int sigframe_alloc(struct rt_sigframe_user_layout *user, | |
139 | unsigned long *offset, size_t size) | |
140 | { | |
33f08261 DM |
141 | return __sigframe_alloc(user, offset, size, true); |
142 | } | |
bb4322f7 | 143 | |
33f08261 DM |
144 | /* Allocate the null terminator record and prevent further allocations */ |
145 | static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) | |
146 | { | |
147 | int ret; | |
bb4322f7 | 148 | |
33f08261 DM |
149 | /* Un-reserve the space reserved for the terminator: */ |
150 | user->limit += TERMINATOR_SIZE; | |
151 | ||
152 | ret = sigframe_alloc(user, &user->end_offset, | |
153 | sizeof(struct _aarch64_ctx)); | |
154 | if (ret) | |
155 | return ret; | |
156 | ||
157 | /* Prevent further allocation: */ | |
158 | user->limit = user->size; | |
bb4322f7 DM |
159 | return 0; |
160 | } | |
161 | ||
bb4891a6 DM |
162 | static void __user *apply_user_offset( |
163 | struct rt_sigframe_user_layout const *user, unsigned long offset) | |
164 | { | |
165 | char __user *base = (char __user *)user->sigframe; | |
166 | ||
167 | return base + offset; | |
168 | } | |
169 | ||
2c020ed8 CM |
170 | static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) |
171 | { | |
65896545 DM |
172 | struct user_fpsimd_state const *fpsimd = |
173 | ¤t->thread.uw.fpsimd_state; | |
2c020ed8 CM |
174 | int err; |
175 | ||
2c020ed8 CM |
176 | /* copy the FP and status/control registers */ |
177 | err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); | |
178 | __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); | |
179 | __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); | |
180 | ||
181 | /* copy the magic/size information */ | |
182 | __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); | |
183 | __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); | |
184 | ||
185 | return err ? -EFAULT : 0; | |
186 | } | |
187 | ||
188 | static int restore_fpsimd_context(struct fpsimd_context __user *ctx) | |
189 | { | |
0abdeff5 | 190 | struct user_fpsimd_state fpsimd; |
2c020ed8 CM |
191 | __u32 magic, size; |
192 | int err = 0; | |
193 | ||
194 | /* check the magic/size information */ | |
195 | __get_user_error(magic, &ctx->head.magic, err); | |
196 | __get_user_error(size, &ctx->head.size, err); | |
197 | if (err) | |
198 | return -EFAULT; | |
199 | if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) | |
200 | return -EINVAL; | |
201 | ||
202 | /* copy the FP and status/control registers */ | |
203 | err = __copy_from_user(fpsimd.vregs, ctx->vregs, | |
204 | sizeof(fpsimd.vregs)); | |
205 | __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); | |
206 | __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); | |
207 | ||
8cd969d2 DM |
208 | clear_thread_flag(TIF_SVE); |
209 | ||
2c020ed8 | 210 | /* load the hardware registers from the fpsimd_state structure */ |
c51f9269 AB |
211 | if (!err) |
212 | fpsimd_update_current_state(&fpsimd); | |
2c020ed8 CM |
213 | |
214 | return err ? -EFAULT : 0; | |
215 | } | |
216 | ||
8cd969d2 | 217 | |
47ccb028 DM |
218 | struct user_ctxs { |
219 | struct fpsimd_context __user *fpsimd; | |
8cd969d2 | 220 | struct sve_context __user *sve; |
47ccb028 DM |
221 | }; |
222 | ||
8cd969d2 DM |
223 | #ifdef CONFIG_ARM64_SVE |
224 | ||
225 | static int preserve_sve_context(struct sve_context __user *ctx) | |
226 | { | |
227 | int err = 0; | |
228 | u16 reserved[ARRAY_SIZE(ctx->__reserved)]; | |
229 | unsigned int vl = current->thread.sve_vl; | |
230 | unsigned int vq = 0; | |
231 | ||
232 | if (test_thread_flag(TIF_SVE)) | |
233 | vq = sve_vq_from_vl(vl); | |
234 | ||
235 | memset(reserved, 0, sizeof(reserved)); | |
236 | ||
237 | __put_user_error(SVE_MAGIC, &ctx->head.magic, err); | |
238 | __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), | |
239 | &ctx->head.size, err); | |
240 | __put_user_error(vl, &ctx->vl, err); | |
241 | BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); | |
242 | err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); | |
243 | ||
244 | if (vq) { | |
245 | /* | |
246 | * This assumes that the SVE state has already been saved to | |
247 | * the task struct by calling preserve_fpsimd_context(). | |
248 | */ | |
249 | err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, | |
250 | current->thread.sve_state, | |
251 | SVE_SIG_REGS_SIZE(vq)); | |
252 | } | |
253 | ||
254 | return err ? -EFAULT : 0; | |
255 | } | |
256 | ||
257 | static int restore_sve_fpsimd_context(struct user_ctxs *user) | |
258 | { | |
259 | int err; | |
260 | unsigned int vq; | |
0abdeff5 | 261 | struct user_fpsimd_state fpsimd; |
8cd969d2 DM |
262 | struct sve_context sve; |
263 | ||
264 | if (__copy_from_user(&sve, user->sve, sizeof(sve))) | |
265 | return -EFAULT; | |
266 | ||
267 | if (sve.vl != current->thread.sve_vl) | |
268 | return -EINVAL; | |
269 | ||
270 | if (sve.head.size <= sizeof(*user->sve)) { | |
271 | clear_thread_flag(TIF_SVE); | |
272 | goto fpsimd_only; | |
273 | } | |
274 | ||
275 | vq = sve_vq_from_vl(sve.vl); | |
276 | ||
277 | if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) | |
278 | return -EINVAL; | |
279 | ||
280 | /* | |
281 | * Careful: we are about __copy_from_user() directly into | |
282 | * thread.sve_state with preemption enabled, so protection is | |
283 | * needed to prevent a racing context switch from writing stale | |
284 | * registers back over the new data. | |
285 | */ | |
286 | ||
287 | fpsimd_flush_task_state(current); | |
8cd969d2 DM |
288 | /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ |
289 | ||
290 | sve_alloc(current); | |
291 | err = __copy_from_user(current->thread.sve_state, | |
292 | (char __user const *)user->sve + | |
293 | SVE_SIG_REGS_OFFSET, | |
294 | SVE_SIG_REGS_SIZE(vq)); | |
295 | if (err) | |
296 | return -EFAULT; | |
297 | ||
298 | set_thread_flag(TIF_SVE); | |
299 | ||
300 | fpsimd_only: | |
301 | /* copy the FP and status/control registers */ | |
302 | /* restore_sigframe() already checked that user->fpsimd != NULL. */ | |
303 | err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, | |
304 | sizeof(fpsimd.vregs)); | |
305 | __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); | |
306 | __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); | |
307 | ||
308 | /* load the hardware registers from the fpsimd_state structure */ | |
309 | if (!err) | |
310 | fpsimd_update_current_state(&fpsimd); | |
311 | ||
312 | return err ? -EFAULT : 0; | |
313 | } | |
314 | ||
315 | #else /* ! CONFIG_ARM64_SVE */ | |
316 | ||
317 | /* Turn any non-optimised out attempts to use these into a link error: */ | |
318 | extern int preserve_sve_context(void __user *ctx); | |
319 | extern int restore_sve_fpsimd_context(struct user_ctxs *user); | |
320 | ||
321 | #endif /* ! CONFIG_ARM64_SVE */ | |
322 | ||
323 | ||
47ccb028 DM |
324 | static int parse_user_sigframe(struct user_ctxs *user, |
325 | struct rt_sigframe __user *sf) | |
326 | { | |
327 | struct sigcontext __user *const sc = &sf->uc.uc_mcontext; | |
bb4891a6 DM |
328 | struct _aarch64_ctx __user *head; |
329 | char __user *base = (char __user *)&sc->__reserved; | |
47ccb028 | 330 | size_t offset = 0; |
bb4891a6 | 331 | size_t limit = sizeof(sc->__reserved); |
33f08261 DM |
332 | bool have_extra_context = false; |
333 | char const __user *const sfp = (char const __user *)sf; | |
47ccb028 DM |
334 | |
335 | user->fpsimd = NULL; | |
8cd969d2 | 336 | user->sve = NULL; |
47ccb028 | 337 | |
bb4891a6 DM |
338 | if (!IS_ALIGNED((unsigned long)base, 16)) |
339 | goto invalid; | |
340 | ||
47ccb028 | 341 | while (1) { |
bb4891a6 | 342 | int err = 0; |
47ccb028 | 343 | u32 magic, size; |
33f08261 DM |
344 | char const __user *userp; |
345 | struct extra_context const __user *extra; | |
346 | u64 extra_datap; | |
347 | u32 extra_size; | |
348 | struct _aarch64_ctx const __user *end; | |
349 | u32 end_magic, end_size; | |
47ccb028 | 350 | |
bb4891a6 | 351 | if (limit - offset < sizeof(*head)) |
47ccb028 DM |
352 | goto invalid; |
353 | ||
bb4891a6 DM |
354 | if (!IS_ALIGNED(offset, 16)) |
355 | goto invalid; | |
356 | ||
357 | head = (struct _aarch64_ctx __user *)(base + offset); | |
47ccb028 DM |
358 | __get_user_error(magic, &head->magic, err); |
359 | __get_user_error(size, &head->size, err); | |
360 | if (err) | |
361 | return err; | |
362 | ||
bb4891a6 DM |
363 | if (limit - offset < size) |
364 | goto invalid; | |
365 | ||
47ccb028 DM |
366 | switch (magic) { |
367 | case 0: | |
368 | if (size) | |
369 | goto invalid; | |
370 | ||
371 | goto done; | |
372 | ||
373 | case FPSIMD_MAGIC: | |
374 | if (user->fpsimd) | |
375 | goto invalid; | |
376 | ||
bb4891a6 | 377 | if (size < sizeof(*user->fpsimd)) |
47ccb028 DM |
378 | goto invalid; |
379 | ||
380 | user->fpsimd = (struct fpsimd_context __user *)head; | |
381 | break; | |
382 | ||
383 | case ESR_MAGIC: | |
384 | /* ignore */ | |
385 | break; | |
386 | ||
8cd969d2 DM |
387 | case SVE_MAGIC: |
388 | if (!system_supports_sve()) | |
389 | goto invalid; | |
390 | ||
391 | if (user->sve) | |
392 | goto invalid; | |
393 | ||
394 | if (size < sizeof(*user->sve)) | |
395 | goto invalid; | |
396 | ||
397 | user->sve = (struct sve_context __user *)head; | |
398 | break; | |
399 | ||
33f08261 DM |
400 | case EXTRA_MAGIC: |
401 | if (have_extra_context) | |
402 | goto invalid; | |
403 | ||
404 | if (size < sizeof(*extra)) | |
405 | goto invalid; | |
406 | ||
407 | userp = (char const __user *)head; | |
408 | ||
409 | extra = (struct extra_context const __user *)userp; | |
410 | userp += size; | |
411 | ||
412 | __get_user_error(extra_datap, &extra->datap, err); | |
413 | __get_user_error(extra_size, &extra->size, err); | |
414 | if (err) | |
415 | return err; | |
416 | ||
417 | /* Check for the dummy terminator in __reserved[]: */ | |
418 | ||
419 | if (limit - offset - size < TERMINATOR_SIZE) | |
420 | goto invalid; | |
421 | ||
422 | end = (struct _aarch64_ctx const __user *)userp; | |
423 | userp += TERMINATOR_SIZE; | |
424 | ||
425 | __get_user_error(end_magic, &end->magic, err); | |
426 | __get_user_error(end_size, &end->size, err); | |
427 | if (err) | |
428 | return err; | |
429 | ||
430 | if (end_magic || end_size) | |
431 | goto invalid; | |
432 | ||
433 | /* Prevent looping/repeated parsing of extra_context */ | |
434 | have_extra_context = true; | |
435 | ||
436 | base = (__force void __user *)extra_datap; | |
437 | if (!IS_ALIGNED((unsigned long)base, 16)) | |
438 | goto invalid; | |
439 | ||
440 | if (!IS_ALIGNED(extra_size, 16)) | |
441 | goto invalid; | |
442 | ||
443 | if (base != userp) | |
444 | goto invalid; | |
445 | ||
446 | /* Reject "unreasonably large" frames: */ | |
447 | if (extra_size > sfp + SIGFRAME_MAXSZ - userp) | |
448 | goto invalid; | |
449 | ||
450 | /* | |
451 | * Ignore trailing terminator in __reserved[] | |
452 | * and start parsing extra data: | |
453 | */ | |
454 | offset = 0; | |
455 | limit = extra_size; | |
abf73988 | 456 | |
96d4f267 | 457 | if (!access_ok(base, limit)) |
abf73988 DM |
458 | goto invalid; |
459 | ||
33f08261 DM |
460 | continue; |
461 | ||
47ccb028 DM |
462 | default: |
463 | goto invalid; | |
464 | } | |
465 | ||
466 | if (size < sizeof(*head)) | |
467 | goto invalid; | |
468 | ||
bb4891a6 | 469 | if (limit - offset < size) |
47ccb028 DM |
470 | goto invalid; |
471 | ||
472 | offset += size; | |
473 | } | |
474 | ||
475 | done: | |
47ccb028 DM |
476 | return 0; |
477 | ||
478 | invalid: | |
479 | return -EINVAL; | |
480 | } | |
481 | ||
2c020ed8 CM |
482 | static int restore_sigframe(struct pt_regs *regs, |
483 | struct rt_sigframe __user *sf) | |
484 | { | |
485 | sigset_t set; | |
486 | int i, err; | |
47ccb028 | 487 | struct user_ctxs user; |
2c020ed8 CM |
488 | |
489 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); | |
490 | if (err == 0) | |
491 | set_current_blocked(&set); | |
492 | ||
493 | for (i = 0; i < 31; i++) | |
494 | __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], | |
495 | err); | |
496 | __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); | |
497 | __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); | |
498 | __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); | |
499 | ||
500 | /* | |
501 | * Avoid sys_rt_sigreturn() restarting. | |
502 | */ | |
17c28958 | 503 | forget_syscall(regs); |
2c020ed8 | 504 | |
dbd4d7ca | 505 | err |= !valid_user_regs(®s->user_regs, current); |
47ccb028 DM |
506 | if (err == 0) |
507 | err = parse_user_sigframe(&user, sf); | |
2c020ed8 | 508 | |
8cd969d2 DM |
509 | if (err == 0) { |
510 | if (!user.fpsimd) | |
511 | return -EINVAL; | |
512 | ||
513 | if (user.sve) { | |
514 | if (!system_supports_sve()) | |
515 | return -EINVAL; | |
516 | ||
517 | err = restore_sve_fpsimd_context(&user); | |
518 | } else { | |
519 | err = restore_fpsimd_context(user.fpsimd); | |
520 | } | |
521 | } | |
2c020ed8 CM |
522 | |
523 | return err; | |
524 | } | |
525 | ||
bf4ce5cc | 526 | SYSCALL_DEFINE0(rt_sigreturn) |
2c020ed8 | 527 | { |
3085e164 | 528 | struct pt_regs *regs = current_pt_regs(); |
2c020ed8 CM |
529 | struct rt_sigframe __user *frame; |
530 | ||
531 | /* Always make any pending restarted system calls return -EINTR */ | |
f56141e3 | 532 | current->restart_block.fn = do_no_restart_syscall; |
2c020ed8 CM |
533 | |
534 | /* | |
535 | * Since we stacked the signal on a 128-bit boundary, then 'sp' should | |
536 | * be word aligned here. | |
537 | */ | |
538 | if (regs->sp & 15) | |
539 | goto badframe; | |
540 | ||
541 | frame = (struct rt_sigframe __user *)regs->sp; | |
542 | ||
96d4f267 | 543 | if (!access_ok(frame, sizeof (*frame))) |
2c020ed8 CM |
544 | goto badframe; |
545 | ||
546 | if (restore_sigframe(regs, frame)) | |
547 | goto badframe; | |
548 | ||
207bdae4 | 549 | if (restore_altstack(&frame->uc.uc_stack)) |
2c020ed8 CM |
550 | goto badframe; |
551 | ||
552 | return regs->regs[0]; | |
553 | ||
554 | badframe: | |
f71016a8 | 555 | arm64_notify_segfault(regs->sp); |
2c020ed8 CM |
556 | return 0; |
557 | } | |
558 | ||
94b07c1f DM |
559 | /* |
560 | * Determine the layout of optional records in the signal frame | |
561 | * | |
562 | * add_all: if true, lays out the biggest possible signal frame for | |
563 | * this task; otherwise, generates a layout for the current state | |
564 | * of the task. | |
565 | */ | |
566 | static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, | |
567 | bool add_all) | |
bb4891a6 | 568 | { |
bb4322f7 DM |
569 | int err; |
570 | ||
571 | err = sigframe_alloc(user, &user->fpsimd_offset, | |
572 | sizeof(struct fpsimd_context)); | |
573 | if (err) | |
574 | return err; | |
bb4891a6 DM |
575 | |
576 | /* fault information, if valid */ | |
94b07c1f | 577 | if (add_all || current->thread.fault_code) { |
bb4322f7 DM |
578 | err = sigframe_alloc(user, &user->esr_offset, |
579 | sizeof(struct esr_context)); | |
580 | if (err) | |
581 | return err; | |
bb4891a6 DM |
582 | } |
583 | ||
8cd969d2 DM |
584 | if (system_supports_sve()) { |
585 | unsigned int vq = 0; | |
586 | ||
94b07c1f DM |
587 | if (add_all || test_thread_flag(TIF_SVE)) { |
588 | int vl = sve_max_vl; | |
589 | ||
590 | if (!add_all) | |
591 | vl = current->thread.sve_vl; | |
592 | ||
593 | vq = sve_vq_from_vl(vl); | |
594 | } | |
8cd969d2 DM |
595 | |
596 | err = sigframe_alloc(user, &user->sve_offset, | |
597 | SVE_SIG_CONTEXT_SIZE(vq)); | |
598 | if (err) | |
599 | return err; | |
600 | } | |
601 | ||
33f08261 | 602 | return sigframe_alloc_end(user); |
bb4891a6 DM |
603 | } |
604 | ||
20987de3 | 605 | static int setup_sigframe(struct rt_sigframe_user_layout *user, |
2c020ed8 CM |
606 | struct pt_regs *regs, sigset_t *set) |
607 | { | |
608 | int i, err = 0; | |
20987de3 | 609 | struct rt_sigframe __user *sf = user->sigframe; |
2c020ed8 | 610 | |
304ef4e8 | 611 | /* set up the stack frame for unwinding */ |
20987de3 DM |
612 | __put_user_error(regs->regs[29], &user->next_frame->fp, err); |
613 | __put_user_error(regs->regs[30], &user->next_frame->lr, err); | |
304ef4e8 | 614 | |
2c020ed8 CM |
615 | for (i = 0; i < 31; i++) |
616 | __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], | |
617 | err); | |
618 | __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); | |
619 | __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); | |
620 | __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); | |
621 | ||
622 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); | |
623 | ||
624 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); | |
625 | ||
0e0276d1 | 626 | if (err == 0) { |
bb4891a6 DM |
627 | struct fpsimd_context __user *fpsimd_ctx = |
628 | apply_user_offset(user, user->fpsimd_offset); | |
0e0276d1 | 629 | err |= preserve_fpsimd_context(fpsimd_ctx); |
0e0276d1 | 630 | } |
2c020ed8 | 631 | |
15af1942 | 632 | /* fault information, if valid */ |
bb4891a6 DM |
633 | if (err == 0 && user->esr_offset) { |
634 | struct esr_context __user *esr_ctx = | |
635 | apply_user_offset(user, user->esr_offset); | |
636 | ||
15af1942 CM |
637 | __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); |
638 | __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); | |
639 | __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); | |
15af1942 CM |
640 | } |
641 | ||
8cd969d2 DM |
642 | /* Scalable Vector Extension state, if present */ |
643 | if (system_supports_sve() && err == 0 && user->sve_offset) { | |
644 | struct sve_context __user *sve_ctx = | |
645 | apply_user_offset(user, user->sve_offset); | |
646 | err |= preserve_sve_context(sve_ctx); | |
647 | } | |
648 | ||
33f08261 DM |
649 | if (err == 0 && user->extra_offset) { |
650 | char __user *sfp = (char __user *)user->sigframe; | |
651 | char __user *userp = | |
652 | apply_user_offset(user, user->extra_offset); | |
653 | ||
654 | struct extra_context __user *extra; | |
655 | struct _aarch64_ctx __user *end; | |
656 | u64 extra_datap; | |
657 | u32 extra_size; | |
658 | ||
659 | extra = (struct extra_context __user *)userp; | |
660 | userp += EXTRA_CONTEXT_SIZE; | |
661 | ||
662 | end = (struct _aarch64_ctx __user *)userp; | |
663 | userp += TERMINATOR_SIZE; | |
664 | ||
665 | /* | |
666 | * extra_datap is just written to the signal frame. | |
667 | * The value gets cast back to a void __user * | |
668 | * during sigreturn. | |
669 | */ | |
670 | extra_datap = (__force u64)userp; | |
671 | extra_size = sfp + round_up(user->size, 16) - userp; | |
672 | ||
673 | __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); | |
674 | __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); | |
675 | __put_user_error(extra_datap, &extra->datap, err); | |
676 | __put_user_error(extra_size, &extra->size, err); | |
677 | ||
678 | /* Add the terminator */ | |
679 | __put_user_error(0, &end->magic, err); | |
680 | __put_user_error(0, &end->size, err); | |
681 | } | |
682 | ||
2c020ed8 | 683 | /* set the "end" magic */ |
bb4891a6 DM |
684 | if (err == 0) { |
685 | struct _aarch64_ctx __user *end = | |
686 | apply_user_offset(user, user->end_offset); | |
687 | ||
688 | __put_user_error(0, &end->magic, err); | |
689 | __put_user_error(0, &end->size, err); | |
690 | } | |
2c020ed8 CM |
691 | |
692 | return err; | |
693 | } | |
694 | ||
20987de3 DM |
695 | static int get_sigframe(struct rt_sigframe_user_layout *user, |
696 | struct ksignal *ksig, struct pt_regs *regs) | |
2c020ed8 CM |
697 | { |
698 | unsigned long sp, sp_top; | |
bb4891a6 DM |
699 | int err; |
700 | ||
701 | init_user_layout(user); | |
94b07c1f | 702 | err = setup_sigframe_layout(user, false); |
bb4891a6 DM |
703 | if (err) |
704 | return err; | |
2c020ed8 | 705 | |
38a7be3c | 706 | sp = sp_top = sigsp(regs->sp, ksig); |
2c020ed8 | 707 | |
20987de3 DM |
708 | sp = round_down(sp - sizeof(struct frame_record), 16); |
709 | user->next_frame = (struct frame_record __user *)sp; | |
710 | ||
bb4891a6 | 711 | sp = round_down(sp, 16) - sigframe_size(user); |
20987de3 | 712 | user->sigframe = (struct rt_sigframe __user *)sp; |
2c020ed8 CM |
713 | |
714 | /* | |
715 | * Check that we can actually write to the signal frame. | |
716 | */ | |
96d4f267 | 717 | if (!access_ok(user->sigframe, sp_top - sp)) |
20987de3 | 718 | return -EFAULT; |
2c020ed8 | 719 | |
20987de3 | 720 | return 0; |
2c020ed8 CM |
721 | } |
722 | ||
304ef4e8 | 723 | static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, |
20987de3 | 724 | struct rt_sigframe_user_layout *user, int usig) |
2c020ed8 | 725 | { |
2c020ed8 | 726 | __sigrestore_t sigtramp; |
2c020ed8 CM |
727 | |
728 | regs->regs[0] = usig; | |
20987de3 DM |
729 | regs->sp = (unsigned long)user->sigframe; |
730 | regs->regs[29] = (unsigned long)&user->next_frame->fp; | |
2c020ed8 CM |
731 | regs->pc = (unsigned long)ka->sa.sa_handler; |
732 | ||
733 | if (ka->sa.sa_flags & SA_RESTORER) | |
734 | sigtramp = ka->sa.sa_restorer; | |
735 | else | |
736 | sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); | |
737 | ||
738 | regs->regs[30] = (unsigned long)sigtramp; | |
2c020ed8 CM |
739 | } |
740 | ||
00554fa4 RW |
741 | static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
742 | struct pt_regs *regs) | |
2c020ed8 | 743 | { |
20987de3 | 744 | struct rt_sigframe_user_layout user; |
2c020ed8 | 745 | struct rt_sigframe __user *frame; |
2c020ed8 CM |
746 | int err = 0; |
747 | ||
8cd969d2 DM |
748 | fpsimd_signal_preserve_current_state(); |
749 | ||
20987de3 | 750 | if (get_sigframe(&user, ksig, regs)) |
2c020ed8 CM |
751 | return 1; |
752 | ||
20987de3 DM |
753 | frame = user.sigframe; |
754 | ||
2c020ed8 CM |
755 | __put_user_error(0, &frame->uc.uc_flags, err); |
756 | __put_user_error(NULL, &frame->uc.uc_link, err); | |
757 | ||
207bdae4 | 758 | err |= __save_altstack(&frame->uc.uc_stack, regs->sp); |
20987de3 | 759 | err |= setup_sigframe(&user, regs, set); |
304ef4e8 | 760 | if (err == 0) { |
20987de3 | 761 | setup_return(regs, &ksig->ka, &user, usig); |
00554fa4 RW |
762 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
763 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); | |
304ef4e8 WD |
764 | regs->regs[1] = (unsigned long)&frame->info; |
765 | regs->regs[2] = (unsigned long)&frame->uc; | |
766 | } | |
2c020ed8 CM |
767 | } |
768 | ||
769 | return err; | |
770 | } | |
771 | ||
772 | static void setup_restart_syscall(struct pt_regs *regs) | |
773 | { | |
774 | if (is_compat_task()) | |
775 | compat_setup_restart_syscall(regs); | |
776 | else | |
777 | regs->regs[8] = __NR_restart_syscall; | |
778 | } | |
779 | ||
780 | /* | |
781 | * OK, we're invoking a handler | |
782 | */ | |
00554fa4 | 783 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
2c020ed8 | 784 | { |
2c020ed8 CM |
785 | struct task_struct *tsk = current; |
786 | sigset_t *oldset = sigmask_to_save(); | |
00554fa4 | 787 | int usig = ksig->sig; |
2c020ed8 CM |
788 | int ret; |
789 | ||
409d5db4 WD |
790 | rseq_signal_deliver(ksig, regs); |
791 | ||
2c020ed8 CM |
792 | /* |
793 | * Set up the stack frame | |
794 | */ | |
795 | if (is_compat_task()) { | |
00554fa4 RW |
796 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
797 | ret = compat_setup_rt_frame(usig, ksig, oldset, regs); | |
2c020ed8 | 798 | else |
00554fa4 | 799 | ret = compat_setup_frame(usig, ksig, oldset, regs); |
2c020ed8 | 800 | } else { |
00554fa4 | 801 | ret = setup_rt_frame(usig, ksig, oldset, regs); |
2c020ed8 CM |
802 | } |
803 | ||
804 | /* | |
805 | * Check that the resulting registers are actually sane. | |
806 | */ | |
dbd4d7ca | 807 | ret |= !valid_user_regs(®s->user_regs, current); |
2c020ed8 | 808 | |
2c020ed8 CM |
809 | /* |
810 | * Fast forward the stepping logic so we step into the signal | |
811 | * handler. | |
812 | */ | |
00554fa4 RW |
813 | if (!ret) |
814 | user_fastforward_single_step(tsk); | |
2c020ed8 | 815 | |
00554fa4 | 816 | signal_setup_done(ret, ksig, 0); |
2c020ed8 CM |
817 | } |
818 | ||
819 | /* | |
820 | * Note that 'init' is a special process: it doesn't get signals it doesn't | |
821 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | |
822 | * mistake. | |
823 | * | |
824 | * Note that we go through the signals twice: once to check the signals that | |
825 | * the kernel can handle, and then we build all the user-level signal handling | |
826 | * stack-frames in one go after that. | |
827 | */ | |
828 | static void do_signal(struct pt_regs *regs) | |
829 | { | |
830 | unsigned long continue_addr = 0, restart_addr = 0; | |
00554fa4 | 831 | int retval = 0; |
00554fa4 | 832 | struct ksignal ksig; |
0fe42512 | 833 | bool syscall = in_syscall(regs); |
2c020ed8 CM |
834 | |
835 | /* | |
836 | * If we were from a system call, check for system call restarting... | |
837 | */ | |
0fe42512 | 838 | if (syscall) { |
2c020ed8 CM |
839 | continue_addr = regs->pc; |
840 | restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); | |
841 | retval = regs->regs[0]; | |
842 | ||
843 | /* | |
844 | * Avoid additional syscall restarting via ret_to_user. | |
845 | */ | |
17c28958 | 846 | forget_syscall(regs); |
2c020ed8 CM |
847 | |
848 | /* | |
849 | * Prepare for system call restart. We do this here so that a | |
850 | * debugger will see the already changed PC. | |
851 | */ | |
852 | switch (retval) { | |
853 | case -ERESTARTNOHAND: | |
854 | case -ERESTARTSYS: | |
855 | case -ERESTARTNOINTR: | |
856 | case -ERESTART_RESTARTBLOCK: | |
857 | regs->regs[0] = regs->orig_x0; | |
858 | regs->pc = restart_addr; | |
859 | break; | |
860 | } | |
861 | } | |
862 | ||
863 | /* | |
864 | * Get the signal to deliver. When running under ptrace, at this point | |
865 | * the debugger may change all of our registers. | |
866 | */ | |
00554fa4 | 867 | if (get_signal(&ksig)) { |
2c020ed8 CM |
868 | /* |
869 | * Depending on the signal settings, we may need to revert the | |
870 | * decision to restart the system call, but skip this if a | |
871 | * debugger has chosen to restart at a different PC. | |
872 | */ | |
873 | if (regs->pc == restart_addr && | |
874 | (retval == -ERESTARTNOHAND || | |
875 | retval == -ERESTART_RESTARTBLOCK || | |
876 | (retval == -ERESTARTSYS && | |
00554fa4 | 877 | !(ksig.ka.sa.sa_flags & SA_RESTART)))) { |
2c020ed8 CM |
878 | regs->regs[0] = -EINTR; |
879 | regs->pc = continue_addr; | |
880 | } | |
881 | ||
00554fa4 | 882 | handle_signal(&ksig, regs); |
2c020ed8 CM |
883 | return; |
884 | } | |
885 | ||
886 | /* | |
887 | * Handle restarting a different system call. As above, if a debugger | |
888 | * has chosen to restart at a different PC, ignore the restart. | |
889 | */ | |
0fe42512 | 890 | if (syscall && regs->pc == restart_addr) { |
2c020ed8 CM |
891 | if (retval == -ERESTART_RESTARTBLOCK) |
892 | setup_restart_syscall(regs); | |
893 | user_rewind_single_step(current); | |
894 | } | |
895 | ||
896 | restore_saved_sigmask(); | |
897 | } | |
898 | ||
899 | asmlinkage void do_notify_resume(struct pt_regs *regs, | |
3eb6f1f9 | 900 | unsigned long thread_flags) |
2c020ed8 | 901 | { |
421dd6fa CM |
902 | /* |
903 | * The assembly code enters us with IRQs off, but it hasn't | |
904 | * informed the tracing code of that for efficiency reasons. | |
905 | * Update the trace code with the current status. | |
906 | */ | |
907 | trace_hardirqs_off(); | |
cf7de27a | 908 | |
421dd6fa | 909 | do { |
a2048e34 TG |
910 | /* Check valid user FS if needed */ |
911 | addr_limit_user_check(); | |
912 | ||
421dd6fa | 913 | if (thread_flags & _TIF_NEED_RESCHED) { |
8d66772e JM |
914 | /* Unmask Debug and SError for the next task */ |
915 | local_daif_restore(DAIF_PROCCTX_NOIRQ); | |
916 | ||
421dd6fa CM |
917 | schedule(); |
918 | } else { | |
8d66772e | 919 | local_daif_restore(DAIF_PROCCTX); |
421dd6fa | 920 | |
9842ceae PA |
921 | if (thread_flags & _TIF_UPROBE) |
922 | uprobe_notify_resume(regs); | |
923 | ||
421dd6fa CM |
924 | if (thread_flags & _TIF_SIGPENDING) |
925 | do_signal(regs); | |
926 | ||
927 | if (thread_flags & _TIF_NOTIFY_RESUME) { | |
928 | clear_thread_flag(TIF_NOTIFY_RESUME); | |
929 | tracehook_notify_resume(regs); | |
409d5db4 | 930 | rseq_handle_notify_resume(NULL, regs); |
421dd6fa CM |
931 | } |
932 | ||
933 | if (thread_flags & _TIF_FOREIGN_FPSTATE) | |
934 | fpsimd_restore_current_state(); | |
935 | } | |
005f78cd | 936 | |
8d66772e | 937 | local_daif_mask(); |
421dd6fa CM |
938 | thread_flags = READ_ONCE(current_thread_info()->flags); |
939 | } while (thread_flags & _TIF_WORK_MASK); | |
2c020ed8 | 940 | } |
94b07c1f DM |
941 | |
942 | unsigned long __ro_after_init signal_minsigstksz; | |
943 | ||
944 | /* | |
945 | * Determine the stack space required for guaranteed signal devliery. | |
946 | * This function is used to populate AT_MINSIGSTKSZ at process startup. | |
947 | * cpufeatures setup is assumed to be complete. | |
948 | */ | |
949 | void __init minsigstksz_setup(void) | |
950 | { | |
951 | struct rt_sigframe_user_layout user; | |
952 | ||
953 | init_user_layout(&user); | |
954 | ||
955 | /* | |
956 | * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't | |
957 | * be big enough, but it's our best guess: | |
958 | */ | |
959 | if (WARN_ON(setup_sigframe_layout(&user, true))) | |
960 | return; | |
961 | ||
962 | signal_minsigstksz = sigframe_size(&user) + | |
963 | round_up(sizeof(struct frame_record), 16) + | |
964 | 16; /* max alignment padding */ | |
965 | } |