]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e1ad7468 DL |
2 | /* |
3 | * fs/eventfd.c | |
4 | * | |
5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> | |
6 | * | |
7 | */ | |
8 | ||
9 | #include <linux/file.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/fs.h> | |
174cd4b1 | 13 | #include <linux/sched/signal.h> |
e1ad7468 | 14 | #include <linux/kernel.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
e1ad7468 DL |
16 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/anon_inodes.h> | |
7747cdb2 | 19 | #include <linux/syscalls.h> |
630d9c47 | 20 | #include <linux/export.h> |
13389010 DL |
21 | #include <linux/kref.h> |
22 | #include <linux/eventfd.h> | |
cbac5542 CG |
23 | #include <linux/proc_fs.h> |
24 | #include <linux/seq_file.h> | |
b556db17 | 25 | #include <linux/idr.h> |
12aceb89 | 26 | #include <linux/uio.h> |
b556db17 | 27 | |
ce528c4c | 28 | static DEFINE_IDA(eventfd_ida); |
e1ad7468 DL |
29 | |
30 | struct eventfd_ctx { | |
13389010 | 31 | struct kref kref; |
e1ad7468 DL |
32 | wait_queue_head_t wqh; |
33 | /* | |
34 | * Every time that a write(2) is performed on an eventfd, the | |
35 | * value of the __u64 being written is added to "count" and a | |
36 | * wakeup is performed on "wqh". A read(2) will return the "count" | |
37 | * value to userspace, and will reset "count" to zero. The kernel | |
13389010 | 38 | * side eventfd_signal() also, adds to the "count" counter and |
e1ad7468 DL |
39 | * issue a wakeup. |
40 | */ | |
41 | __u64 count; | |
bcd0b235 | 42 | unsigned int flags; |
b556db17 | 43 | int id; |
e1ad7468 DL |
44 | }; |
45 | ||
13389010 DL |
46 | /** |
47 | * eventfd_signal - Adds @n to the eventfd counter. | |
48 | * @ctx: [in] Pointer to the eventfd context. | |
49 | * @n: [in] Value of the counter to be added to the eventfd internal counter. | |
50 | * The value cannot be negative. | |
51 | * | |
52 | * This function is supposed to be called by the kernel in paths that do not | |
53 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX | |
a9a08845 | 54 | * value, and we signal this as overflow condition by returning a EPOLLERR |
13389010 DL |
55 | * to poll(2). |
56 | * | |
20d5a865 | 57 | * Returns the amount by which the counter was incremented. This will be less |
ee62c6b2 | 58 | * than @n if the counter has overflowed. |
e1ad7468 | 59 | */ |
ee62c6b2 | 60 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) |
e1ad7468 | 61 | { |
e1ad7468 DL |
62 | unsigned long flags; |
63 | ||
b5e683d5 JA |
64 | /* |
65 | * Deadlock or stack overflow issues can happen if we recurse here | |
66 | * through waitqueue wakeup handlers. If the caller users potentially | |
67 | * nested waitqueues with custom wakeup handlers, then it should | |
b542e383 TG |
68 | * check eventfd_signal_allowed() before calling this function. If |
69 | * it returns false, the eventfd_signal() call should be deferred to a | |
b5e683d5 JA |
70 | * safe context. |
71 | */ | |
9f0deaa1 | 72 | if (WARN_ON_ONCE(current->in_eventfd)) |
b5e683d5 JA |
73 | return 0; |
74 | ||
d48eb233 | 75 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
9f0deaa1 | 76 | current->in_eventfd = 1; |
e1ad7468 | 77 | if (ULLONG_MAX - ctx->count < n) |
ee62c6b2 | 78 | n = ULLONG_MAX - ctx->count; |
e1ad7468 DL |
79 | ctx->count += n; |
80 | if (waitqueue_active(&ctx->wqh)) | |
a9a08845 | 81 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
9f0deaa1 | 82 | current->in_eventfd = 0; |
d48eb233 | 83 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
e1ad7468 DL |
84 | |
85 | return n; | |
86 | } | |
5718607b | 87 | EXPORT_SYMBOL_GPL(eventfd_signal); |
e1ad7468 | 88 | |
562787a5 DL |
89 | static void eventfd_free_ctx(struct eventfd_ctx *ctx) |
90 | { | |
b556db17 MY |
91 | if (ctx->id >= 0) |
92 | ida_simple_remove(&eventfd_ida, ctx->id); | |
562787a5 DL |
93 | kfree(ctx); |
94 | } | |
95 | ||
13389010 DL |
96 | static void eventfd_free(struct kref *kref) |
97 | { | |
98 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); | |
99 | ||
562787a5 | 100 | eventfd_free_ctx(ctx); |
13389010 DL |
101 | } |
102 | ||
13389010 DL |
103 | /** |
104 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. | |
105 | * @ctx: [in] Pointer to eventfd context. | |
106 | * | |
107 | * The eventfd context reference must have been previously acquired either | |
105f2b70 | 108 | * with eventfd_ctx_fdget() or eventfd_ctx_fileget(). |
13389010 DL |
109 | */ |
110 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | |
111 | { | |
112 | kref_put(&ctx->kref, eventfd_free); | |
113 | } | |
114 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); | |
115 | ||
e1ad7468 DL |
116 | static int eventfd_release(struct inode *inode, struct file *file) |
117 | { | |
13389010 DL |
118 | struct eventfd_ctx *ctx = file->private_data; |
119 | ||
a9a08845 | 120 | wake_up_poll(&ctx->wqh, EPOLLHUP); |
13389010 | 121 | eventfd_ctx_put(ctx); |
e1ad7468 DL |
122 | return 0; |
123 | } | |
124 | ||
a11e1d43 | 125 | static __poll_t eventfd_poll(struct file *file, poll_table *wait) |
e1ad7468 DL |
126 | { |
127 | struct eventfd_ctx *ctx = file->private_data; | |
076ccb76 | 128 | __poll_t events = 0; |
e22553e2 | 129 | u64 count; |
e1ad7468 | 130 | |
a11e1d43 LT |
131 | poll_wait(file, &ctx->wqh, wait); |
132 | ||
a484c3dd PB |
133 | /* |
134 | * All writes to ctx->count occur within ctx->wqh.lock. This read | |
135 | * can be done outside ctx->wqh.lock because we know that poll_wait | |
136 | * takes that lock (through add_wait_queue) if our caller will sleep. | |
137 | * | |
138 | * The read _can_ therefore seep into add_wait_queue's critical | |
139 | * section, but cannot move above it! add_wait_queue's spin_lock acts | |
140 | * as an acquire barrier and ensures that the read be ordered properly | |
141 | * against the writes. The following CAN happen and is safe: | |
142 | * | |
143 | * poll write | |
144 | * ----------------- ------------ | |
145 | * lock ctx->wqh.lock (in poll_wait) | |
146 | * count = ctx->count | |
147 | * __add_wait_queue | |
148 | * unlock ctx->wqh.lock | |
149 | * lock ctx->qwh.lock | |
150 | * ctx->count += n | |
151 | * if (waitqueue_active) | |
152 | * wake_up_locked_poll | |
153 | * unlock ctx->qwh.lock | |
154 | * eventfd_poll returns 0 | |
155 | * | |
156 | * but the following, which would miss a wakeup, cannot happen: | |
157 | * | |
158 | * poll write | |
159 | * ----------------- ------------ | |
160 | * count = ctx->count (INVALID!) | |
161 | * lock ctx->qwh.lock | |
162 | * ctx->count += n | |
163 | * **waitqueue_active is false** | |
164 | * **no wake_up_locked_poll!** | |
165 | * unlock ctx->qwh.lock | |
166 | * lock ctx->wqh.lock (in poll_wait) | |
167 | * __add_wait_queue | |
168 | * unlock ctx->wqh.lock | |
169 | * eventfd_poll returns 0 | |
170 | */ | |
171 | count = READ_ONCE(ctx->count); | |
e1ad7468 | 172 | |
e22553e2 | 173 | if (count > 0) |
a11e1d43 | 174 | events |= EPOLLIN; |
e22553e2 | 175 | if (count == ULLONG_MAX) |
a9a08845 | 176 | events |= EPOLLERR; |
e22553e2 | 177 | if (ULLONG_MAX - 1 > count) |
a11e1d43 | 178 | events |= EPOLLOUT; |
e1ad7468 DL |
179 | |
180 | return events; | |
181 | } | |
182 | ||
28f13267 | 183 | void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
cb289d62 | 184 | { |
28f13267 DW |
185 | lockdep_assert_held(&ctx->wqh.lock); |
186 | ||
cb289d62 DL |
187 | *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; |
188 | ctx->count -= *cnt; | |
189 | } | |
28f13267 | 190 | EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); |
cb289d62 DL |
191 | |
192 | /** | |
193 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | |
194 | * @ctx: [in] Pointer to eventfd context. | |
195 | * @wait: [in] Wait queue to be removed. | |
36182185 | 196 | * @cnt: [out] Pointer to the 64-bit counter value. |
cb289d62 | 197 | * |
36182185 | 198 | * Returns %0 if successful, or the following error codes: |
cb289d62 DL |
199 | * |
200 | * -EAGAIN : The operation would have blocked. | |
201 | * | |
202 | * This is used to atomically remove a wait queue entry from the eventfd wait | |
203 | * queue head, and read/reset the counter value. | |
204 | */ | |
ac6424b9 | 205 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
cb289d62 DL |
206 | __u64 *cnt) |
207 | { | |
208 | unsigned long flags; | |
209 | ||
210 | spin_lock_irqsave(&ctx->wqh.lock, flags); | |
211 | eventfd_ctx_do_read(ctx, cnt); | |
212 | __remove_wait_queue(&ctx->wqh, wait); | |
213 | if (*cnt != 0 && waitqueue_active(&ctx->wqh)) | |
a9a08845 | 214 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); |
cb289d62 DL |
215 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
216 | ||
217 | return *cnt != 0 ? 0 : -EAGAIN; | |
218 | } | |
219 | EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | |
220 | ||
12aceb89 | 221 | static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) |
e1ad7468 | 222 | { |
12aceb89 | 223 | struct file *file = iocb->ki_filp; |
b6364572 | 224 | struct eventfd_ctx *ctx = file->private_data; |
b6364572 | 225 | __u64 ucnt = 0; |
e1ad7468 DL |
226 | DECLARE_WAITQUEUE(wait, current); |
227 | ||
12aceb89 | 228 | if (iov_iter_count(to) < sizeof(ucnt)) |
b6364572 | 229 | return -EINVAL; |
d48eb233 | 230 | spin_lock_irq(&ctx->wqh.lock); |
12aceb89 JA |
231 | if (!ctx->count) { |
232 | if ((file->f_flags & O_NONBLOCK) || | |
233 | (iocb->ki_flags & IOCB_NOWAIT)) { | |
234 | spin_unlock_irq(&ctx->wqh.lock); | |
235 | return -EAGAIN; | |
236 | } | |
e1ad7468 | 237 | __add_wait_queue(&ctx->wqh, &wait); |
cb289d62 | 238 | for (;;) { |
e1ad7468 | 239 | set_current_state(TASK_INTERRUPTIBLE); |
12aceb89 | 240 | if (ctx->count) |
e1ad7468 | 241 | break; |
e1ad7468 | 242 | if (signal_pending(current)) { |
12aceb89 JA |
243 | __remove_wait_queue(&ctx->wqh, &wait); |
244 | __set_current_state(TASK_RUNNING); | |
245 | spin_unlock_irq(&ctx->wqh.lock); | |
246 | return -ERESTARTSYS; | |
e1ad7468 | 247 | } |
d48eb233 | 248 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 249 | schedule(); |
d48eb233 | 250 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
251 | } |
252 | __remove_wait_queue(&ctx->wqh, &wait); | |
253 | __set_current_state(TASK_RUNNING); | |
254 | } | |
12aceb89 | 255 | eventfd_ctx_do_read(ctx, &ucnt); |
9f0deaa1 | 256 | current->in_eventfd = 1; |
12aceb89 JA |
257 | if (waitqueue_active(&ctx->wqh)) |
258 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); | |
9f0deaa1 | 259 | current->in_eventfd = 0; |
d48eb233 | 260 | spin_unlock_irq(&ctx->wqh.lock); |
12aceb89 | 261 | if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) |
b6364572 | 262 | return -EFAULT; |
cb289d62 | 263 | |
12aceb89 | 264 | return sizeof(ucnt); |
cb289d62 | 265 | } |
e1ad7468 DL |
266 | |
267 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, | |
268 | loff_t *ppos) | |
269 | { | |
270 | struct eventfd_ctx *ctx = file->private_data; | |
271 | ssize_t res; | |
272 | __u64 ucnt; | |
273 | DECLARE_WAITQUEUE(wait, current); | |
274 | ||
275 | if (count < sizeof(ucnt)) | |
276 | return -EINVAL; | |
277 | if (copy_from_user(&ucnt, buf, sizeof(ucnt))) | |
278 | return -EFAULT; | |
279 | if (ucnt == ULLONG_MAX) | |
280 | return -EINVAL; | |
d48eb233 | 281 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
282 | res = -EAGAIN; |
283 | if (ULLONG_MAX - ctx->count > ucnt) | |
284 | res = sizeof(ucnt); | |
285 | else if (!(file->f_flags & O_NONBLOCK)) { | |
286 | __add_wait_queue(&ctx->wqh, &wait); | |
287 | for (res = 0;;) { | |
288 | set_current_state(TASK_INTERRUPTIBLE); | |
289 | if (ULLONG_MAX - ctx->count > ucnt) { | |
290 | res = sizeof(ucnt); | |
291 | break; | |
292 | } | |
293 | if (signal_pending(current)) { | |
294 | res = -ERESTARTSYS; | |
295 | break; | |
296 | } | |
d48eb233 | 297 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 298 | schedule(); |
d48eb233 | 299 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
300 | } |
301 | __remove_wait_queue(&ctx->wqh, &wait); | |
302 | __set_current_state(TASK_RUNNING); | |
303 | } | |
bcd0b235 | 304 | if (likely(res > 0)) { |
e1ad7468 | 305 | ctx->count += ucnt; |
9f0deaa1 | 306 | current->in_eventfd = 1; |
e1ad7468 | 307 | if (waitqueue_active(&ctx->wqh)) |
a9a08845 | 308 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
9f0deaa1 | 309 | current->in_eventfd = 0; |
e1ad7468 | 310 | } |
d48eb233 | 311 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
312 | |
313 | return res; | |
314 | } | |
315 | ||
cbac5542 | 316 | #ifdef CONFIG_PROC_FS |
a3816ab0 | 317 | static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) |
cbac5542 CG |
318 | { |
319 | struct eventfd_ctx *ctx = f->private_data; | |
cbac5542 CG |
320 | |
321 | spin_lock_irq(&ctx->wqh.lock); | |
a3816ab0 JP |
322 | seq_printf(m, "eventfd-count: %16llx\n", |
323 | (unsigned long long)ctx->count); | |
cbac5542 | 324 | spin_unlock_irq(&ctx->wqh.lock); |
b556db17 | 325 | seq_printf(m, "eventfd-id: %d\n", ctx->id); |
cbac5542 CG |
326 | } |
327 | #endif | |
328 | ||
e1ad7468 | 329 | static const struct file_operations eventfd_fops = { |
cbac5542 CG |
330 | #ifdef CONFIG_PROC_FS |
331 | .show_fdinfo = eventfd_show_fdinfo, | |
332 | #endif | |
e1ad7468 | 333 | .release = eventfd_release, |
a11e1d43 | 334 | .poll = eventfd_poll, |
12aceb89 | 335 | .read_iter = eventfd_read, |
e1ad7468 | 336 | .write = eventfd_write, |
6038f373 | 337 | .llseek = noop_llseek, |
e1ad7468 DL |
338 | }; |
339 | ||
13389010 DL |
340 | /** |
341 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. | |
342 | * @fd: [in] Eventfd file descriptor. | |
343 | * | |
344 | * Returns a pointer to the eventfd file structure in case of success, or the | |
345 | * following error pointer: | |
346 | * | |
347 | * -EBADF : Invalid @fd file descriptor. | |
348 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
349 | */ | |
e1ad7468 DL |
350 | struct file *eventfd_fget(int fd) |
351 | { | |
352 | struct file *file; | |
353 | ||
354 | file = fget(fd); | |
355 | if (!file) | |
356 | return ERR_PTR(-EBADF); | |
357 | if (file->f_op != &eventfd_fops) { | |
358 | fput(file); | |
359 | return ERR_PTR(-EINVAL); | |
360 | } | |
361 | ||
362 | return file; | |
363 | } | |
5718607b | 364 | EXPORT_SYMBOL_GPL(eventfd_fget); |
e1ad7468 | 365 | |
13389010 DL |
366 | /** |
367 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. | |
368 | * @fd: [in] Eventfd file descriptor. | |
369 | * | |
370 | * Returns a pointer to the internal eventfd context, otherwise the error | |
371 | * pointers returned by the following functions: | |
372 | * | |
373 | * eventfd_fget | |
374 | */ | |
375 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) | |
376 | { | |
13389010 | 377 | struct eventfd_ctx *ctx; |
36a74117 AV |
378 | struct fd f = fdget(fd); |
379 | if (!f.file) | |
380 | return ERR_PTR(-EBADF); | |
381 | ctx = eventfd_ctx_fileget(f.file); | |
382 | fdput(f); | |
13389010 DL |
383 | return ctx; |
384 | } | |
385 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); | |
386 | ||
387 | /** | |
388 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. | |
389 | * @file: [in] Eventfd file pointer. | |
390 | * | |
391 | * Returns a pointer to the internal eventfd context, otherwise the error | |
392 | * pointer: | |
393 | * | |
394 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
395 | */ | |
396 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) | |
397 | { | |
105f2b70 EB |
398 | struct eventfd_ctx *ctx; |
399 | ||
13389010 DL |
400 | if (file->f_op != &eventfd_fops) |
401 | return ERR_PTR(-EINVAL); | |
402 | ||
105f2b70 EB |
403 | ctx = file->private_data; |
404 | kref_get(&ctx->kref); | |
405 | return ctx; | |
13389010 DL |
406 | } |
407 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); | |
408 | ||
2fc96f83 | 409 | static int do_eventfd(unsigned int count, int flags) |
e1ad7468 | 410 | { |
e1ad7468 | 411 | struct eventfd_ctx *ctx; |
12aceb89 | 412 | struct file *file; |
7d815165 | 413 | int fd; |
e1ad7468 | 414 | |
e38b36f3 UD |
415 | /* Check the EFD_* constants for consistency. */ |
416 | BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); | |
417 | BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); | |
418 | ||
bcd0b235 | 419 | if (flags & ~EFD_FLAGS_SET) |
7d815165 | 420 | return -EINVAL; |
b087498e | 421 | |
e1ad7468 DL |
422 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
423 | if (!ctx) | |
7d815165 | 424 | return -ENOMEM; |
e1ad7468 | 425 | |
13389010 | 426 | kref_init(&ctx->kref); |
e1ad7468 | 427 | init_waitqueue_head(&ctx->wqh); |
e1ad7468 | 428 | ctx->count = count; |
bcd0b235 | 429 | ctx->flags = flags; |
b556db17 | 430 | ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); |
e1ad7468 | 431 | |
12aceb89 JA |
432 | flags &= EFD_SHARED_FCNTL_FLAGS; |
433 | flags |= O_RDWR; | |
434 | fd = get_unused_fd_flags(flags); | |
7d815165 | 435 | if (fd < 0) |
12aceb89 JA |
436 | goto err; |
437 | ||
438 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); | |
439 | if (IS_ERR(file)) { | |
440 | put_unused_fd(fd); | |
441 | fd = PTR_ERR(file); | |
442 | goto err; | |
443 | } | |
562787a5 | 444 | |
12aceb89 JA |
445 | file->f_mode |= FMODE_NOWAIT; |
446 | fd_install(fd, file); | |
447 | return fd; | |
448 | err: | |
449 | eventfd_free_ctx(ctx); | |
2030a42c | 450 | return fd; |
e1ad7468 DL |
451 | } |
452 | ||
2fc96f83 DB |
453 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
454 | { | |
455 | return do_eventfd(count, flags); | |
456 | } | |
457 | ||
d4e82042 | 458 | SYSCALL_DEFINE1(eventfd, unsigned int, count) |
b087498e | 459 | { |
2fc96f83 | 460 | return do_eventfd(count, 0); |
b087498e | 461 | } |
bcd0b235 | 462 |