]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - fs/signalfd.c
Linux 6.10-rc3
[thirdparty/kernel/linux.git] / fs / signalfd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/signalfd.c
4 *
5 * Copyright (C) 2003 Linus Torvalds
6 *
7 * Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org>
8 * Changed ->read() to return a siginfo strcture instead of signal number.
9 * Fixed locking in ->poll().
10 * Added sighand-detach notification.
11 * Added fd re-use in sys_signalfd() syscall.
12 * Now using anonymous inode source.
13 * Thanks to Oleg Nesterov for useful code review and suggestions.
14 * More comments and suggestions from Arnd Bergmann.
15 * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
16 * Retrieve multiple signals with one read() call
17 * Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
18 * Attach to the sighand only during read() and poll().
19 */
20
21 #include <linux/file.h>
22 #include <linux/poll.h>
23 #include <linux/init.h>
24 #include <linux/fs.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/signal.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/signalfd.h>
32 #include <linux/syscalls.h>
33 #include <linux/proc_fs.h>
34 #include <linux/compat.h>
35
36 void signalfd_cleanup(struct sighand_struct *sighand)
37 {
38 wake_up_pollfree(&sighand->signalfd_wqh);
39 }
40
41 struct signalfd_ctx {
42 sigset_t sigmask;
43 };
44
45 static int signalfd_release(struct inode *inode, struct file *file)
46 {
47 kfree(file->private_data);
48 return 0;
49 }
50
51 static __poll_t signalfd_poll(struct file *file, poll_table *wait)
52 {
53 struct signalfd_ctx *ctx = file->private_data;
54 __poll_t events = 0;
55
56 poll_wait(file, &current->sighand->signalfd_wqh, wait);
57
58 spin_lock_irq(&current->sighand->siglock);
59 if (next_signal(&current->pending, &ctx->sigmask) ||
60 next_signal(&current->signal->shared_pending,
61 &ctx->sigmask))
62 events |= EPOLLIN;
63 spin_unlock_irq(&current->sighand->siglock);
64
65 return events;
66 }
67
68 /*
69 * Copied from copy_siginfo_to_user() in kernel/signal.c
70 */
71 static int signalfd_copyinfo(struct iov_iter *to, kernel_siginfo_t const *kinfo)
72 {
73 struct signalfd_siginfo new;
74
75 BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
76
77 /*
78 * Unused members should be zero ...
79 */
80 memset(&new, 0, sizeof(new));
81
82 /*
83 * If you change siginfo_t structure, please be sure
84 * this code is fixed accordingly.
85 */
86 new.ssi_signo = kinfo->si_signo;
87 new.ssi_errno = kinfo->si_errno;
88 new.ssi_code = kinfo->si_code;
89 switch (siginfo_layout(kinfo->si_signo, kinfo->si_code)) {
90 case SIL_KILL:
91 new.ssi_pid = kinfo->si_pid;
92 new.ssi_uid = kinfo->si_uid;
93 break;
94 case SIL_TIMER:
95 new.ssi_tid = kinfo->si_tid;
96 new.ssi_overrun = kinfo->si_overrun;
97 new.ssi_ptr = (long) kinfo->si_ptr;
98 new.ssi_int = kinfo->si_int;
99 break;
100 case SIL_POLL:
101 new.ssi_band = kinfo->si_band;
102 new.ssi_fd = kinfo->si_fd;
103 break;
104 case SIL_FAULT_BNDERR:
105 case SIL_FAULT_PKUERR:
106 case SIL_FAULT_PERF_EVENT:
107 /*
108 * Fall through to the SIL_FAULT case. SIL_FAULT_BNDERR,
109 * SIL_FAULT_PKUERR, and SIL_FAULT_PERF_EVENT are only
110 * generated by faults that deliver them synchronously to
111 * userspace. In case someone injects one of these signals
112 * and signalfd catches it treat it as SIL_FAULT.
113 */
114 case SIL_FAULT:
115 new.ssi_addr = (long) kinfo->si_addr;
116 break;
117 case SIL_FAULT_TRAPNO:
118 new.ssi_addr = (long) kinfo->si_addr;
119 new.ssi_trapno = kinfo->si_trapno;
120 break;
121 case SIL_FAULT_MCEERR:
122 new.ssi_addr = (long) kinfo->si_addr;
123 new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
124 break;
125 case SIL_CHLD:
126 new.ssi_pid = kinfo->si_pid;
127 new.ssi_uid = kinfo->si_uid;
128 new.ssi_status = kinfo->si_status;
129 new.ssi_utime = kinfo->si_utime;
130 new.ssi_stime = kinfo->si_stime;
131 break;
132 case SIL_RT:
133 /*
134 * This case catches also the signals queued by sigqueue().
135 */
136 new.ssi_pid = kinfo->si_pid;
137 new.ssi_uid = kinfo->si_uid;
138 new.ssi_ptr = (long) kinfo->si_ptr;
139 new.ssi_int = kinfo->si_int;
140 break;
141 case SIL_SYS:
142 new.ssi_call_addr = (long) kinfo->si_call_addr;
143 new.ssi_syscall = kinfo->si_syscall;
144 new.ssi_arch = kinfo->si_arch;
145 break;
146 }
147
148 if (!copy_to_iter_full(&new, sizeof(struct signalfd_siginfo), to))
149 return -EFAULT;
150
151 return sizeof(struct signalfd_siginfo);
152 }
153
154 static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
155 int nonblock)
156 {
157 enum pid_type type;
158 ssize_t ret;
159 DECLARE_WAITQUEUE(wait, current);
160
161 spin_lock_irq(&current->sighand->siglock);
162 ret = dequeue_signal(current, &ctx->sigmask, info, &type);
163 switch (ret) {
164 case 0:
165 if (!nonblock)
166 break;
167 ret = -EAGAIN;
168 fallthrough;
169 default:
170 spin_unlock_irq(&current->sighand->siglock);
171 return ret;
172 }
173
174 add_wait_queue(&current->sighand->signalfd_wqh, &wait);
175 for (;;) {
176 set_current_state(TASK_INTERRUPTIBLE);
177 ret = dequeue_signal(current, &ctx->sigmask, info, &type);
178 if (ret != 0)
179 break;
180 if (signal_pending(current)) {
181 ret = -ERESTARTSYS;
182 break;
183 }
184 spin_unlock_irq(&current->sighand->siglock);
185 schedule();
186 spin_lock_irq(&current->sighand->siglock);
187 }
188 spin_unlock_irq(&current->sighand->siglock);
189
190 remove_wait_queue(&current->sighand->signalfd_wqh, &wait);
191 __set_current_state(TASK_RUNNING);
192
193 return ret;
194 }
195
196 /*
197 * Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
198 * error code. The "count" parameter must be at least the size of a
199 * "struct signalfd_siginfo".
200 */
201 static ssize_t signalfd_read_iter(struct kiocb *iocb, struct iov_iter *to)
202 {
203 struct file *file = iocb->ki_filp;
204 struct signalfd_ctx *ctx = file->private_data;
205 size_t count = iov_iter_count(to);
206 ssize_t ret, total = 0;
207 kernel_siginfo_t info;
208 bool nonblock;
209
210 count /= sizeof(struct signalfd_siginfo);
211 if (!count)
212 return -EINVAL;
213
214 nonblock = file->f_flags & O_NONBLOCK || iocb->ki_flags & IOCB_NOWAIT;
215 do {
216 ret = signalfd_dequeue(ctx, &info, nonblock);
217 if (unlikely(ret <= 0))
218 break;
219 ret = signalfd_copyinfo(to, &info);
220 if (ret < 0)
221 break;
222 total += ret;
223 nonblock = 1;
224 } while (--count);
225
226 return total ? total: ret;
227 }
228
229 #ifdef CONFIG_PROC_FS
230 static void signalfd_show_fdinfo(struct seq_file *m, struct file *f)
231 {
232 struct signalfd_ctx *ctx = f->private_data;
233 sigset_t sigmask;
234
235 sigmask = ctx->sigmask;
236 signotset(&sigmask);
237 render_sigset_t(m, "sigmask:\t", &sigmask);
238 }
239 #endif
240
241 static const struct file_operations signalfd_fops = {
242 #ifdef CONFIG_PROC_FS
243 .show_fdinfo = signalfd_show_fdinfo,
244 #endif
245 .release = signalfd_release,
246 .poll = signalfd_poll,
247 .read_iter = signalfd_read_iter,
248 .llseek = noop_llseek,
249 };
250
251 static int do_signalfd4(int ufd, sigset_t *mask, int flags)
252 {
253 struct signalfd_ctx *ctx;
254
255 /* Check the SFD_* constants for consistency. */
256 BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
257 BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
258
259 if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
260 return -EINVAL;
261
262 sigdelsetmask(mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
263 signotset(mask);
264
265 if (ufd == -1) {
266 struct file *file;
267
268 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
269 if (!ctx)
270 return -ENOMEM;
271
272 ctx->sigmask = *mask;
273
274 ufd = get_unused_fd_flags(flags & O_CLOEXEC);
275 if (ufd < 0) {
276 kfree(ctx);
277 return ufd;
278 }
279
280 file = anon_inode_getfile("[signalfd]", &signalfd_fops, ctx,
281 O_RDWR | (flags & O_NONBLOCK));
282 if (IS_ERR(file)) {
283 put_unused_fd(ufd);
284 kfree(ctx);
285 return PTR_ERR(file);
286 }
287 file->f_mode |= FMODE_NOWAIT;
288
289 fd_install(ufd, file);
290 } else {
291 struct fd f = fdget(ufd);
292 if (!f.file)
293 return -EBADF;
294 ctx = f.file->private_data;
295 if (f.file->f_op != &signalfd_fops) {
296 fdput(f);
297 return -EINVAL;
298 }
299 spin_lock_irq(&current->sighand->siglock);
300 ctx->sigmask = *mask;
301 spin_unlock_irq(&current->sighand->siglock);
302
303 wake_up(&current->sighand->signalfd_wqh);
304 fdput(f);
305 }
306
307 return ufd;
308 }
309
310 SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
311 size_t, sizemask, int, flags)
312 {
313 sigset_t mask;
314
315 if (sizemask != sizeof(sigset_t))
316 return -EINVAL;
317 if (copy_from_user(&mask, user_mask, sizeof(mask)))
318 return -EFAULT;
319 return do_signalfd4(ufd, &mask, flags);
320 }
321
322 SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
323 size_t, sizemask)
324 {
325 sigset_t mask;
326
327 if (sizemask != sizeof(sigset_t))
328 return -EINVAL;
329 if (copy_from_user(&mask, user_mask, sizeof(mask)))
330 return -EFAULT;
331 return do_signalfd4(ufd, &mask, 0);
332 }
333
334 #ifdef CONFIG_COMPAT
335 static long do_compat_signalfd4(int ufd,
336 const compat_sigset_t __user *user_mask,
337 compat_size_t sigsetsize, int flags)
338 {
339 sigset_t mask;
340
341 if (sigsetsize != sizeof(compat_sigset_t))
342 return -EINVAL;
343 if (get_compat_sigset(&mask, user_mask))
344 return -EFAULT;
345 return do_signalfd4(ufd, &mask, flags);
346 }
347
348 COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd,
349 const compat_sigset_t __user *, user_mask,
350 compat_size_t, sigsetsize,
351 int, flags)
352 {
353 return do_compat_signalfd4(ufd, user_mask, sigsetsize, flags);
354 }
355
356 COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd,
357 const compat_sigset_t __user *, user_mask,
358 compat_size_t, sigsetsize)
359 {
360 return do_compat_signalfd4(ufd, user_mask, sigsetsize, 0);
361 }
362 #endif