]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/syscall_64.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / powerpc / kernel / syscall_64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/err.h>
4 #include <asm/asm-prototypes.h>
5 #include <asm/book3s/64/kup-radix.h>
6 #include <asm/cputime.h>
7 #include <asm/hw_irq.h>
8 #include <asm/kprobes.h>
9 #include <asm/paca.h>
10 #include <asm/ptrace.h>
11 #include <asm/reg.h>
12 #include <asm/signal.h>
13 #include <asm/switch_to.h>
14 #include <asm/syscall.h>
15 #include <asm/time.h>
16 #include <asm/unistd.h>
17
18 typedef long (*syscall_fn)(long, long, long, long, long, long);
19
20 /* Has to run notrace because it is entered not completely "reconciled" */
21 notrace long system_call_exception(long r3, long r4, long r5,
22 long r6, long r7, long r8,
23 unsigned long r0, struct pt_regs *regs)
24 {
25 syscall_fn f;
26
27 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
28 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
29
30 trace_hardirqs_off(); /* finish reconciling */
31
32 if (IS_ENABLED(CONFIG_PPC_BOOK3S))
33 BUG_ON(!(regs->msr & MSR_RI));
34 BUG_ON(!(regs->msr & MSR_PR));
35 BUG_ON(!FULL_REGS(regs));
36 BUG_ON(regs->softe != IRQS_ENABLED);
37
38 account_cpu_user_entry();
39
40 #ifdef CONFIG_PPC_SPLPAR
41 if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
42 firmware_has_feature(FW_FEATURE_SPLPAR)) {
43 struct lppaca *lp = local_paca->lppaca_ptr;
44
45 if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
46 accumulate_stolen_time();
47 }
48 #endif
49
50 kuap_check_amr();
51
52 /*
53 * This is not required for the syscall exit path, but makes the
54 * stack frame look nicer. If this was initialised in the first stack
55 * frame, or if the unwinder was taught the first stack frame always
56 * returns to user with IRQS_ENABLED, this store could be avoided!
57 */
58 regs->softe = IRQS_ENABLED;
59
60 local_irq_enable();
61
62 if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
63 /*
64 * We use the return value of do_syscall_trace_enter() as the
65 * syscall number. If the syscall was rejected for any reason
66 * do_syscall_trace_enter() returns an invalid syscall number
67 * and the test against NR_syscalls will fail and the return
68 * value to be used is in regs->gpr[3].
69 */
70 r0 = do_syscall_trace_enter(regs);
71 if (unlikely(r0 >= NR_syscalls))
72 return regs->gpr[3];
73 r3 = regs->gpr[3];
74 r4 = regs->gpr[4];
75 r5 = regs->gpr[5];
76 r6 = regs->gpr[6];
77 r7 = regs->gpr[7];
78 r8 = regs->gpr[8];
79
80 } else if (unlikely(r0 >= NR_syscalls)) {
81 return -ENOSYS;
82 }
83
84 /* May be faster to do array_index_nospec? */
85 barrier_nospec();
86
87 if (unlikely(is_32bit_task())) {
88 f = (void *)compat_sys_call_table[r0];
89
90 r3 &= 0x00000000ffffffffULL;
91 r4 &= 0x00000000ffffffffULL;
92 r5 &= 0x00000000ffffffffULL;
93 r6 &= 0x00000000ffffffffULL;
94 r7 &= 0x00000000ffffffffULL;
95 r8 &= 0x00000000ffffffffULL;
96
97 } else {
98 f = (void *)sys_call_table[r0];
99 }
100
101 return f(r3, r4, r5, r6, r7, r8);
102 }
103
104 /*
105 * This should be called after a syscall returns, with r3 the return value
106 * from the syscall. If this function returns non-zero, the system call
107 * exit assembly should additionally load all GPR registers and CTR and XER
108 * from the interrupt frame.
109 *
110 * The function graph tracer can not trace the return side of this function,
111 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
112 */
113 notrace unsigned long syscall_exit_prepare(unsigned long r3,
114 struct pt_regs *regs)
115 {
116 unsigned long *ti_flagsp = &current_thread_info()->flags;
117 unsigned long ti_flags;
118 unsigned long ret = 0;
119
120 regs->result = r3;
121
122 /* Check whether the syscall is issued inside a restartable sequence */
123 rseq_syscall(regs);
124
125 ti_flags = *ti_flagsp;
126
127 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO)) {
128 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
129 r3 = -r3;
130 regs->ccr |= 0x10000000; /* Set SO bit in CR */
131 }
132 }
133
134 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
135 if (ti_flags & _TIF_RESTOREALL)
136 ret = _TIF_RESTOREALL;
137 else
138 regs->gpr[3] = r3;
139 clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
140 } else {
141 regs->gpr[3] = r3;
142 }
143
144 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
145 do_syscall_trace_leave(regs);
146 ret |= _TIF_RESTOREALL;
147 }
148
149 again:
150 local_irq_disable();
151 ti_flags = READ_ONCE(*ti_flagsp);
152 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
153 local_irq_enable();
154 if (ti_flags & _TIF_NEED_RESCHED) {
155 schedule();
156 } else {
157 /*
158 * SIGPENDING must restore signal handler function
159 * argument GPRs, and some non-volatiles (e.g., r1).
160 * Restore all for now. This could be made lighter.
161 */
162 if (ti_flags & _TIF_SIGPENDING)
163 ret |= _TIF_RESTOREALL;
164 do_notify_resume(regs, ti_flags);
165 }
166 local_irq_disable();
167 ti_flags = READ_ONCE(*ti_flagsp);
168 }
169
170 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
171 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
172 unlikely((ti_flags & _TIF_RESTORE_TM))) {
173 restore_tm_state(regs);
174 } else {
175 unsigned long mathflags = MSR_FP;
176
177 if (cpu_has_feature(CPU_FTR_VSX))
178 mathflags |= MSR_VEC | MSR_VSX;
179 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
180 mathflags |= MSR_VEC;
181
182 if ((regs->msr & mathflags) != mathflags)
183 restore_math(regs);
184 }
185 }
186
187 /* This must be done with RI=1 because tracing may touch vmaps */
188 trace_hardirqs_on();
189
190 /* This pattern matches prep_irq_for_idle */
191 __hard_EE_RI_disable();
192 if (unlikely(lazy_irq_pending())) {
193 __hard_RI_enable();
194 trace_hardirqs_off();
195 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
196 local_irq_enable();
197 /* Took an interrupt, may have more exit work to do. */
198 goto again;
199 }
200 local_paca->irq_happened = 0;
201 irq_soft_mask_set(IRQS_ENABLED);
202
203 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
204 local_paca->tm_scratch = regs->msr;
205 #endif
206
207 kuap_check_amr();
208
209 account_cpu_user_exit();
210
211 return ret;
212 }
213
214 #ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
215 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
216 {
217 #ifdef CONFIG_PPC_BOOK3E
218 struct thread_struct *ts = &current->thread;
219 #endif
220 unsigned long *ti_flagsp = &current_thread_info()->flags;
221 unsigned long ti_flags;
222 unsigned long flags;
223 unsigned long ret = 0;
224
225 if (IS_ENABLED(CONFIG_PPC_BOOK3S))
226 BUG_ON(!(regs->msr & MSR_RI));
227 BUG_ON(!(regs->msr & MSR_PR));
228 BUG_ON(!FULL_REGS(regs));
229 BUG_ON(regs->softe != IRQS_ENABLED);
230
231 local_irq_save(flags);
232
233 again:
234 ti_flags = READ_ONCE(*ti_flagsp);
235 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
236 local_irq_enable(); /* returning to user: may enable */
237 if (ti_flags & _TIF_NEED_RESCHED) {
238 schedule();
239 } else {
240 if (ti_flags & _TIF_SIGPENDING)
241 ret |= _TIF_RESTOREALL;
242 do_notify_resume(regs, ti_flags);
243 }
244 local_irq_disable();
245 ti_flags = READ_ONCE(*ti_flagsp);
246 }
247
248 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
249 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
250 unlikely((ti_flags & _TIF_RESTORE_TM))) {
251 restore_tm_state(regs);
252 } else {
253 unsigned long mathflags = MSR_FP;
254
255 if (cpu_has_feature(CPU_FTR_VSX))
256 mathflags |= MSR_VEC | MSR_VSX;
257 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
258 mathflags |= MSR_VEC;
259
260 if ((regs->msr & mathflags) != mathflags)
261 restore_math(regs);
262 }
263 }
264
265 trace_hardirqs_on();
266 __hard_EE_RI_disable();
267 if (unlikely(lazy_irq_pending())) {
268 __hard_RI_enable();
269 trace_hardirqs_off();
270 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
271 local_irq_enable();
272 local_irq_disable();
273 /* Took an interrupt, may have more exit work to do. */
274 goto again;
275 }
276 local_paca->irq_happened = 0;
277 irq_soft_mask_set(IRQS_ENABLED);
278
279 #ifdef CONFIG_PPC_BOOK3E
280 if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
281 /*
282 * Check to see if the dbcr0 register is set up to debug.
283 * Use the internal debug mode bit to do this.
284 */
285 mtmsr(mfmsr() & ~MSR_DE);
286 mtspr(SPRN_DBCR0, ts->debug.dbcr0);
287 mtspr(SPRN_DBSR, -1);
288 }
289 #endif
290
291 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
292 local_paca->tm_scratch = regs->msr;
293 #endif
294
295 kuap_check_amr();
296
297 account_cpu_user_exit();
298
299 return ret;
300 }
301
302 void unrecoverable_exception(struct pt_regs *regs);
303 void preempt_schedule_irq(void);
304
305 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
306 {
307 unsigned long *ti_flagsp = &current_thread_info()->flags;
308 unsigned long flags;
309 unsigned long ret = 0;
310
311 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
312 unrecoverable_exception(regs);
313 BUG_ON(regs->msr & MSR_PR);
314 BUG_ON(!FULL_REGS(regs));
315
316 if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
317 clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
318 ret = 1;
319 }
320
321 local_irq_save(flags);
322
323 if (regs->softe == IRQS_ENABLED) {
324 /* Returning to a kernel context with local irqs enabled. */
325 WARN_ON_ONCE(!(regs->msr & MSR_EE));
326 again:
327 if (IS_ENABLED(CONFIG_PREEMPT)) {
328 /* Return to preemptible kernel context */
329 if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
330 if (preempt_count() == 0)
331 preempt_schedule_irq();
332 }
333 }
334
335 trace_hardirqs_on();
336 __hard_EE_RI_disable();
337 if (unlikely(lazy_irq_pending())) {
338 __hard_RI_enable();
339 irq_soft_mask_set(IRQS_ALL_DISABLED);
340 trace_hardirqs_off();
341 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
342 /*
343 * Can't local_irq_restore to replay if we were in
344 * interrupt context. Must replay directly.
345 */
346 if (irqs_disabled_flags(flags)) {
347 replay_soft_interrupts();
348 } else {
349 local_irq_restore(flags);
350 local_irq_save(flags);
351 }
352 /* Took an interrupt, may have more exit work to do. */
353 goto again;
354 }
355 local_paca->irq_happened = 0;
356 irq_soft_mask_set(IRQS_ENABLED);
357 } else {
358 /* Returning to a kernel context with local irqs disabled. */
359 __hard_EE_RI_disable();
360 if (regs->msr & MSR_EE)
361 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
362 }
363
364
365 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366 local_paca->tm_scratch = regs->msr;
367 #endif
368
369 /*
370 * We don't need to restore AMR on the way back to userspace for KUAP.
371 * The value of AMR only matters while we're in the kernel.
372 */
373 kuap_restore_amr(regs);
374
375 return ret;
376 }
377 #endif