1 // SPDX-License-Identifier: GPL-2.0
3 * Code related to the io_uring_register() syscall
5 * Copyright (C) 2023 Jens Axboe
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/syscalls.h>
10 #include <linux/refcount.h>
11 #include <linux/bits.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/nospec.h>
17 #include <linux/compat.h>
18 #include <linux/io_uring.h>
19 #include <linux/io_uring_types.h>
30 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
31 IORING_REGISTER_LAST + IORING_OP_LAST)
33 static int io_eventfd_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
34 unsigned int eventfd_async
)
36 struct io_ev_fd
*ev_fd
;
37 __s32 __user
*fds
= arg
;
40 ev_fd
= rcu_dereference_protected(ctx
->io_ev_fd
,
41 lockdep_is_held(&ctx
->uring_lock
));
45 if (copy_from_user(&fd
, fds
, sizeof(*fds
)))
48 ev_fd
= kmalloc(sizeof(*ev_fd
), GFP_KERNEL
);
52 ev_fd
->cq_ev_fd
= eventfd_ctx_fdget(fd
);
53 if (IS_ERR(ev_fd
->cq_ev_fd
)) {
54 int ret
= PTR_ERR(ev_fd
->cq_ev_fd
);
59 spin_lock(&ctx
->completion_lock
);
60 ctx
->evfd_last_cq_tail
= ctx
->cached_cq_tail
;
61 spin_unlock(&ctx
->completion_lock
);
63 ev_fd
->eventfd_async
= eventfd_async
;
65 rcu_assign_pointer(ctx
->io_ev_fd
, ev_fd
);
66 atomic_set(&ev_fd
->refs
, 1);
67 atomic_set(&ev_fd
->ops
, 0);
71 int io_eventfd_unregister(struct io_ring_ctx
*ctx
)
73 struct io_ev_fd
*ev_fd
;
75 ev_fd
= rcu_dereference_protected(ctx
->io_ev_fd
,
76 lockdep_is_held(&ctx
->uring_lock
));
78 ctx
->has_evfd
= false;
79 rcu_assign_pointer(ctx
->io_ev_fd
, NULL
);
80 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT
), &ev_fd
->ops
))
81 call_rcu(&ev_fd
->rcu
, io_eventfd_ops
);
88 static __cold
int io_probe(struct io_ring_ctx
*ctx
, void __user
*arg
,
91 struct io_uring_probe
*p
;
95 size
= struct_size(p
, ops
, nr_args
);
98 p
= kzalloc(size
, GFP_KERNEL
);
103 if (copy_from_user(p
, arg
, size
))
106 if (memchr_inv(p
, 0, size
))
109 p
->last_op
= IORING_OP_LAST
- 1;
110 if (nr_args
> IORING_OP_LAST
)
111 nr_args
= IORING_OP_LAST
;
113 for (i
= 0; i
< nr_args
; i
++) {
115 if (!io_issue_defs
[i
].not_supported
)
116 p
->ops
[i
].flags
= IO_URING_OP_SUPPORTED
;
121 if (copy_to_user(arg
, p
, size
))
128 int io_unregister_personality(struct io_ring_ctx
*ctx
, unsigned id
)
130 const struct cred
*creds
;
132 creds
= xa_erase(&ctx
->personalities
, id
);
142 static int io_register_personality(struct io_ring_ctx
*ctx
)
144 const struct cred
*creds
;
148 creds
= get_current_cred();
150 ret
= xa_alloc_cyclic(&ctx
->personalities
, &id
, (void *)creds
,
151 XA_LIMIT(0, USHRT_MAX
), &ctx
->pers_next
, GFP_KERNEL
);
159 static __cold
int io_register_restrictions(struct io_ring_ctx
*ctx
,
160 void __user
*arg
, unsigned int nr_args
)
162 struct io_uring_restriction
*res
;
166 /* Restrictions allowed only if rings started disabled */
167 if (!(ctx
->flags
& IORING_SETUP_R_DISABLED
))
170 /* We allow only a single restrictions registration */
171 if (ctx
->restrictions
.registered
)
174 if (!arg
|| nr_args
> IORING_MAX_RESTRICTIONS
)
177 size
= array_size(nr_args
, sizeof(*res
));
178 if (size
== SIZE_MAX
)
181 res
= memdup_user(arg
, size
);
187 for (i
= 0; i
< nr_args
; i
++) {
188 switch (res
[i
].opcode
) {
189 case IORING_RESTRICTION_REGISTER_OP
:
190 if (res
[i
].register_op
>= IORING_REGISTER_LAST
) {
195 __set_bit(res
[i
].register_op
,
196 ctx
->restrictions
.register_op
);
198 case IORING_RESTRICTION_SQE_OP
:
199 if (res
[i
].sqe_op
>= IORING_OP_LAST
) {
204 __set_bit(res
[i
].sqe_op
, ctx
->restrictions
.sqe_op
);
206 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED
:
207 ctx
->restrictions
.sqe_flags_allowed
= res
[i
].sqe_flags
;
209 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED
:
210 ctx
->restrictions
.sqe_flags_required
= res
[i
].sqe_flags
;
219 /* Reset all restrictions if an error happened */
221 memset(&ctx
->restrictions
, 0, sizeof(ctx
->restrictions
));
223 ctx
->restrictions
.registered
= true;
229 static int io_register_enable_rings(struct io_ring_ctx
*ctx
)
231 if (!(ctx
->flags
& IORING_SETUP_R_DISABLED
))
234 if (ctx
->flags
& IORING_SETUP_SINGLE_ISSUER
&& !ctx
->submitter_task
) {
235 WRITE_ONCE(ctx
->submitter_task
, get_task_struct(current
));
237 * Lazy activation attempts would fail if it was polled before
238 * submitter_task is set.
240 if (wq_has_sleeper(&ctx
->poll_wq
))
241 io_activate_pollwq(ctx
);
244 if (ctx
->restrictions
.registered
)
247 ctx
->flags
&= ~IORING_SETUP_R_DISABLED
;
248 if (ctx
->sq_data
&& wq_has_sleeper(&ctx
->sq_data
->wait
))
249 wake_up(&ctx
->sq_data
->wait
);
253 static __cold
int __io_register_iowq_aff(struct io_ring_ctx
*ctx
,
254 cpumask_var_t new_mask
)
258 if (!(ctx
->flags
& IORING_SETUP_SQPOLL
)) {
259 ret
= io_wq_cpu_affinity(current
->io_uring
, new_mask
);
261 mutex_unlock(&ctx
->uring_lock
);
262 ret
= io_sqpoll_wq_cpu_affinity(ctx
, new_mask
);
263 mutex_lock(&ctx
->uring_lock
);
269 static __cold
int io_register_iowq_aff(struct io_ring_ctx
*ctx
,
270 void __user
*arg
, unsigned len
)
272 cpumask_var_t new_mask
;
275 if (!alloc_cpumask_var(&new_mask
, GFP_KERNEL
))
278 cpumask_clear(new_mask
);
279 if (len
> cpumask_size())
280 len
= cpumask_size();
283 if (in_compat_syscall())
284 ret
= compat_get_bitmap(cpumask_bits(new_mask
),
285 (const compat_ulong_t __user
*)arg
,
286 len
* 8 /* CHAR_BIT */);
289 ret
= copy_from_user(new_mask
, arg
, len
);
292 free_cpumask_var(new_mask
);
296 ret
= __io_register_iowq_aff(ctx
, new_mask
);
297 free_cpumask_var(new_mask
);
301 static __cold
int io_unregister_iowq_aff(struct io_ring_ctx
*ctx
)
303 return __io_register_iowq_aff(ctx
, NULL
);
306 static __cold
int io_register_iowq_max_workers(struct io_ring_ctx
*ctx
,
308 __must_hold(&ctx
->uring_lock
)
310 struct io_tctx_node
*node
;
311 struct io_uring_task
*tctx
= NULL
;
312 struct io_sq_data
*sqd
= NULL
;
316 if (copy_from_user(new_count
, arg
, sizeof(new_count
)))
318 for (i
= 0; i
< ARRAY_SIZE(new_count
); i
++)
319 if (new_count
[i
] > INT_MAX
)
322 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
326 * Observe the correct sqd->lock -> ctx->uring_lock
327 * ordering. Fine to drop uring_lock here, we hold
330 refcount_inc(&sqd
->refs
);
331 mutex_unlock(&ctx
->uring_lock
);
332 mutex_lock(&sqd
->lock
);
333 mutex_lock(&ctx
->uring_lock
);
335 tctx
= sqd
->thread
->io_uring
;
338 tctx
= current
->io_uring
;
341 BUILD_BUG_ON(sizeof(new_count
) != sizeof(ctx
->iowq_limits
));
343 for (i
= 0; i
< ARRAY_SIZE(new_count
); i
++)
345 ctx
->iowq_limits
[i
] = new_count
[i
];
346 ctx
->iowq_limits_set
= true;
348 if (tctx
&& tctx
->io_wq
) {
349 ret
= io_wq_max_workers(tctx
->io_wq
, new_count
);
353 memset(new_count
, 0, sizeof(new_count
));
357 mutex_unlock(&sqd
->lock
);
361 if (copy_to_user(arg
, new_count
, sizeof(new_count
)))
364 /* that's it for SQPOLL, only the SQPOLL task creates requests */
368 /* now propagate the restriction to all registered users */
369 list_for_each_entry(node
, &ctx
->tctx_list
, ctx_node
) {
370 struct io_uring_task
*tctx
= node
->task
->io_uring
;
372 if (WARN_ON_ONCE(!tctx
->io_wq
))
375 for (i
= 0; i
< ARRAY_SIZE(new_count
); i
++)
376 new_count
[i
] = ctx
->iowq_limits
[i
];
377 /* ignore errors, it always returns zero anyway */
378 (void)io_wq_max_workers(tctx
->io_wq
, new_count
);
383 mutex_unlock(&sqd
->lock
);
389 static int __io_uring_register(struct io_ring_ctx
*ctx
, unsigned opcode
,
390 void __user
*arg
, unsigned nr_args
)
391 __releases(ctx
->uring_lock
)
392 __acquires(ctx
->uring_lock
)
397 * We don't quiesce the refs for register anymore and so it can't be
398 * dying as we're holding a file ref here.
400 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx
->refs
)))
403 if (ctx
->submitter_task
&& ctx
->submitter_task
!= current
)
406 if (ctx
->restricted
) {
407 opcode
= array_index_nospec(opcode
, IORING_REGISTER_LAST
);
408 if (!test_bit(opcode
, ctx
->restrictions
.register_op
))
413 case IORING_REGISTER_BUFFERS
:
417 ret
= io_sqe_buffers_register(ctx
, arg
, nr_args
, NULL
);
419 case IORING_UNREGISTER_BUFFERS
:
423 ret
= io_sqe_buffers_unregister(ctx
);
425 case IORING_REGISTER_FILES
:
429 ret
= io_sqe_files_register(ctx
, arg
, nr_args
, NULL
);
431 case IORING_UNREGISTER_FILES
:
435 ret
= io_sqe_files_unregister(ctx
);
437 case IORING_REGISTER_FILES_UPDATE
:
438 ret
= io_register_files_update(ctx
, arg
, nr_args
);
440 case IORING_REGISTER_EVENTFD
:
444 ret
= io_eventfd_register(ctx
, arg
, 0);
446 case IORING_REGISTER_EVENTFD_ASYNC
:
450 ret
= io_eventfd_register(ctx
, arg
, 1);
452 case IORING_UNREGISTER_EVENTFD
:
456 ret
= io_eventfd_unregister(ctx
);
458 case IORING_REGISTER_PROBE
:
460 if (!arg
|| nr_args
> 256)
462 ret
= io_probe(ctx
, arg
, nr_args
);
464 case IORING_REGISTER_PERSONALITY
:
468 ret
= io_register_personality(ctx
);
470 case IORING_UNREGISTER_PERSONALITY
:
474 ret
= io_unregister_personality(ctx
, nr_args
);
476 case IORING_REGISTER_ENABLE_RINGS
:
480 ret
= io_register_enable_rings(ctx
);
482 case IORING_REGISTER_RESTRICTIONS
:
483 ret
= io_register_restrictions(ctx
, arg
, nr_args
);
485 case IORING_REGISTER_FILES2
:
486 ret
= io_register_rsrc(ctx
, arg
, nr_args
, IORING_RSRC_FILE
);
488 case IORING_REGISTER_FILES_UPDATE2
:
489 ret
= io_register_rsrc_update(ctx
, arg
, nr_args
,
492 case IORING_REGISTER_BUFFERS2
:
493 ret
= io_register_rsrc(ctx
, arg
, nr_args
, IORING_RSRC_BUFFER
);
495 case IORING_REGISTER_BUFFERS_UPDATE
:
496 ret
= io_register_rsrc_update(ctx
, arg
, nr_args
,
499 case IORING_REGISTER_IOWQ_AFF
:
501 if (!arg
|| !nr_args
)
503 ret
= io_register_iowq_aff(ctx
, arg
, nr_args
);
505 case IORING_UNREGISTER_IOWQ_AFF
:
509 ret
= io_unregister_iowq_aff(ctx
);
511 case IORING_REGISTER_IOWQ_MAX_WORKERS
:
513 if (!arg
|| nr_args
!= 2)
515 ret
= io_register_iowq_max_workers(ctx
, arg
);
517 case IORING_REGISTER_RING_FDS
:
518 ret
= io_ringfd_register(ctx
, arg
, nr_args
);
520 case IORING_UNREGISTER_RING_FDS
:
521 ret
= io_ringfd_unregister(ctx
, arg
, nr_args
);
523 case IORING_REGISTER_PBUF_RING
:
525 if (!arg
|| nr_args
!= 1)
527 ret
= io_register_pbuf_ring(ctx
, arg
);
529 case IORING_UNREGISTER_PBUF_RING
:
531 if (!arg
|| nr_args
!= 1)
533 ret
= io_unregister_pbuf_ring(ctx
, arg
);
535 case IORING_REGISTER_SYNC_CANCEL
:
537 if (!arg
|| nr_args
!= 1)
539 ret
= io_sync_cancel(ctx
, arg
);
541 case IORING_REGISTER_FILE_ALLOC_RANGE
:
545 ret
= io_register_file_alloc_range(ctx
, arg
);
547 case IORING_REGISTER_PBUF_STATUS
:
549 if (!arg
|| nr_args
!= 1)
551 ret
= io_register_pbuf_status(ctx
, arg
);
561 SYSCALL_DEFINE4(io_uring_register
, unsigned int, fd
, unsigned int, opcode
,
562 void __user
*, arg
, unsigned int, nr_args
)
564 struct io_ring_ctx
*ctx
;
567 bool use_registered_ring
;
569 use_registered_ring
= !!(opcode
& IORING_REGISTER_USE_REGISTERED_RING
);
570 opcode
&= ~IORING_REGISTER_USE_REGISTERED_RING
;
572 if (opcode
>= IORING_REGISTER_LAST
)
575 if (use_registered_ring
) {
577 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
578 * need only dereference our task private array to find it.
580 struct io_uring_task
*tctx
= current
->io_uring
;
582 if (unlikely(!tctx
|| fd
>= IO_RINGFD_REG_MAX
))
584 fd
= array_index_nospec(fd
, IO_RINGFD_REG_MAX
);
585 file
= tctx
->registered_rings
[fd
];
593 if (!io_is_uring_fops(file
))
597 ctx
= file
->private_data
;
599 mutex_lock(&ctx
->uring_lock
);
600 ret
= __io_uring_register(ctx
, opcode
, arg
, nr_args
);
601 mutex_unlock(&ctx
->uring_lock
);
602 trace_io_uring_register(ctx
, opcode
, ctx
->nr_user_files
, ctx
->nr_user_bufs
, ret
);
604 if (!use_registered_ring
)