]> git.ipfire.org Git - people/ms/linux.git/blame - io_uring/notif.c
Merge tag 'arm-soc/for-6.0/devicetree' of https://github.com/Broadcom/stblinux into...
[people/ms/linux.git] / io_uring / notif.c
CommitLineData
eb42cebb
PB
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/file.h>
4#include <linux/slab.h>
5#include <linux/net.h>
6#include <linux/io_uring.h>
7
8#include "io_uring.h"
9#include "notif.h"
68ef5578 10#include "rsrc.h"
eb42cebb 11
14b146b6 12static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
eb42cebb 13{
14b146b6 14 struct io_notif_data *nd = io_notif_to_data(notif);
eb42cebb
PB
15 struct io_ring_ctx *ctx = notif->ctx;
16
14b146b6
PB
17 if (nd->account_pages && ctx->user) {
18 __io_unaccount_mem(ctx->user, nd->account_pages);
19 nd->account_pages = 0;
e29e3bd4 20 }
14b146b6 21 io_req_task_complete(notif, locked);
eb42cebb
PB
22}
23
14b146b6
PB
24static inline void io_notif_complete(struct io_kiocb *notif)
25 __must_hold(&notif->ctx->uring_lock)
eb42cebb 26{
14b146b6 27 bool locked = true;
eb42cebb 28
14b146b6 29 __io_notif_complete_tw(notif, &locked);
eb42cebb
PB
30}
31
32static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
33 struct ubuf_info *uarg,
34 bool success)
35{
14b146b6
PB
36 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
37 struct io_kiocb *notif = cmd_to_io_kiocb(nd);
eb4a299b 38
14b146b6
PB
39 if (refcount_dec_and_test(&uarg->refcnt)) {
40 notif->io_task_work.func = __io_notif_complete_tw;
41 io_req_task_work_add(notif);
eb4a299b
PB
42 }
43}
44
14b146b6 45struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
eb42cebb
PB
46 struct io_notif_slot *slot)
47 __must_hold(&ctx->uring_lock)
48{
14b146b6
PB
49 struct io_kiocb *notif;
50 struct io_notif_data *nd;
51
52 if (unlikely(!io_alloc_req_refill(ctx)))
53 return NULL;
54 notif = io_alloc_req(ctx);
55 notif->opcode = IORING_OP_NOP;
56 notif->flags = 0;
57 notif->file = NULL;
58 notif->task = current;
59 io_get_task_refs(1);
60 notif->rsrc_node = NULL;
61 io_req_set_rsrc_node(notif, ctx, 0);
62 notif->cqe.user_data = slot->tag;
63 notif->cqe.flags = slot->seq++;
64 notif->cqe.res = 0;
65
66 nd = io_notif_to_data(notif);
67 nd->account_pages = 0;
68 nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
69 nd->uarg.callback = io_uring_tx_zerocopy_callback;
eb42cebb 70 /* master ref owned by io_notif_slot, will be dropped on flush */
14b146b6 71 refcount_set(&nd->uarg.refcnt, 1);
eb42cebb
PB
72 return notif;
73}
74
63809137 75void io_notif_slot_flush(struct io_notif_slot *slot)
2cacedc8 76 __must_hold(&slot->notif->ctx->uring_lock)
eb42cebb 77{
14b146b6
PB
78 struct io_kiocb *notif = slot->notif;
79 struct io_notif_data *nd = io_notif_to_data(notif);
eb42cebb
PB
80
81 slot->notif = NULL;
82
eb42cebb 83 /* drop slot's master ref */
53bdc88a
PB
84 if (refcount_dec_and_test(&nd->uarg.refcnt)) {
85 notif->io_task_work.func = __io_notif_complete_tw;
86 io_req_task_work_add(notif);
87 }
eb42cebb
PB
88}
89
90__cold int io_notif_unregister(struct io_ring_ctx *ctx)
91 __must_hold(&ctx->uring_lock)
92{
93 int i;
94
95 if (!ctx->notif_slots)
96 return -ENXIO;
97
98 for (i = 0; i < ctx->nr_notif_slots; i++) {
99 struct io_notif_slot *slot = &ctx->notif_slots[i];
14b146b6
PB
100 struct io_kiocb *notif = slot->notif;
101 struct io_notif_data *nd;
eb42cebb 102
14b146b6
PB
103 if (!notif)
104 continue;
da2634e8 105 nd = io_notif_to_data(notif);
14b146b6
PB
106 slot->notif = NULL;
107 if (!refcount_dec_and_test(&nd->uarg.refcnt))
e58d498e 108 continue;
14b146b6
PB
109 notif->io_task_work.func = __io_notif_complete_tw;
110 io_req_task_work_add(notif);
eb42cebb
PB
111 }
112
113 kvfree(ctx->notif_slots);
114 ctx->notif_slots = NULL;
115 ctx->nr_notif_slots = 0;
bc24d6bd
PB
116 return 0;
117}
118
119__cold int io_notif_register(struct io_ring_ctx *ctx,
120 void __user *arg, unsigned int size)
121 __must_hold(&ctx->uring_lock)
122{
123 struct io_uring_notification_slot __user *slots;
124 struct io_uring_notification_slot slot;
125 struct io_uring_notification_register reg;
126 unsigned i;
127
128 if (ctx->nr_notif_slots)
129 return -EBUSY;
130 if (size != sizeof(reg))
131 return -EINVAL;
132 if (copy_from_user(&reg, arg, sizeof(reg)))
133 return -EFAULT;
134 if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS)
135 return -EINVAL;
136 if (reg.resv || reg.resv2 || reg.resv3)
137 return -EINVAL;
138
139 slots = u64_to_user_ptr(reg.data);
140 ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]),
141 GFP_KERNEL_ACCOUNT);
142 if (!ctx->notif_slots)
143 return -ENOMEM;
144
145 for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) {
146 struct io_notif_slot *notif_slot = &ctx->notif_slots[i];
147
148 if (copy_from_user(&slot, &slots[i], sizeof(slot))) {
149 io_notif_unregister(ctx);
150 return -EFAULT;
151 }
152 if (slot.resv[0] | slot.resv[1] | slot.resv[2]) {
153 io_notif_unregister(ctx);
154 return -EINVAL;
155 }
156 notif_slot->tag = slot.tag;
157 }
eb42cebb 158 return 0;
e58d498e 159}