]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ | |
3 | ||
4 | #ifndef _LINUX_SKMSG_H | |
5 | #define _LINUX_SKMSG_H | |
6 | ||
7 | #include <linux/bpf.h> | |
8 | #include <linux/filter.h> | |
9 | #include <linux/scatterlist.h> | |
10 | #include <linux/skbuff.h> | |
11 | ||
12 | #include <net/sock.h> | |
13 | #include <net/tcp.h> | |
14 | #include <net/strparser.h> | |
15 | ||
16 | #define MAX_MSG_FRAGS MAX_SKB_FRAGS | |
17 | #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) | |
18 | ||
19 | enum __sk_action { | |
20 | __SK_DROP = 0, | |
21 | __SK_PASS, | |
22 | __SK_REDIRECT, | |
23 | __SK_NONE, | |
24 | }; | |
25 | ||
26 | struct sk_msg_sg { | |
27 | u32 start; | |
28 | u32 curr; | |
29 | u32 end; | |
30 | u32 size; | |
31 | u32 copybreak; | |
32 | DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); | |
33 | /* The extra two elements: | |
34 | * 1) used for chaining the front and sections when the list becomes | |
35 | * partitioned (e.g. end < start). The crypto APIs require the | |
36 | * chaining; | |
37 | * 2) to chain tailer SG entries after the message. | |
38 | */ | |
39 | struct scatterlist data[MAX_MSG_FRAGS + 2]; | |
40 | }; | |
41 | ||
42 | /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ | |
43 | struct sk_msg { | |
44 | struct sk_msg_sg sg; | |
45 | void *data; | |
46 | void *data_end; | |
47 | u32 apply_bytes; | |
48 | u32 cork_bytes; | |
49 | u32 flags; | |
50 | struct sk_buff *skb; | |
51 | struct sock *sk_redir; | |
52 | struct sock *sk; | |
53 | struct list_head list; | |
54 | }; | |
55 | ||
56 | struct sk_psock_progs { | |
57 | struct bpf_prog *msg_parser; | |
58 | struct bpf_prog *stream_parser; | |
59 | struct bpf_prog *stream_verdict; | |
60 | struct bpf_prog *skb_verdict; | |
61 | struct bpf_link *msg_parser_link; | |
62 | struct bpf_link *stream_parser_link; | |
63 | struct bpf_link *stream_verdict_link; | |
64 | struct bpf_link *skb_verdict_link; | |
65 | }; | |
66 | ||
67 | enum sk_psock_state_bits { | |
68 | SK_PSOCK_TX_ENABLED, | |
69 | SK_PSOCK_RX_STRP_ENABLED, | |
70 | }; | |
71 | ||
72 | struct sk_psock_link { | |
73 | struct list_head list; | |
74 | struct bpf_map *map; | |
75 | void *link_raw; | |
76 | }; | |
77 | ||
78 | struct sk_psock_work_state { | |
79 | u32 len; | |
80 | u32 off; | |
81 | }; | |
82 | ||
83 | struct sk_psock { | |
84 | struct sock *sk; | |
85 | struct sock *sk_redir; | |
86 | u32 apply_bytes; | |
87 | u32 cork_bytes; | |
88 | u32 eval; | |
89 | bool redir_ingress; /* undefined if sk_redir is null */ | |
90 | struct sk_msg *cork; | |
91 | struct sk_psock_progs progs; | |
92 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) | |
93 | struct strparser strp; | |
94 | u32 copied_seq; | |
95 | u32 ingress_bytes; | |
96 | #endif | |
97 | struct sk_buff_head ingress_skb; | |
98 | struct list_head ingress_msg; | |
99 | spinlock_t ingress_lock; | |
100 | unsigned long state; | |
101 | struct list_head link; | |
102 | spinlock_t link_lock; | |
103 | refcount_t refcnt; | |
104 | void (*saved_unhash)(struct sock *sk); | |
105 | void (*saved_destroy)(struct sock *sk); | |
106 | void (*saved_close)(struct sock *sk, long timeout); | |
107 | void (*saved_write_space)(struct sock *sk); | |
108 | void (*saved_data_ready)(struct sock *sk); | |
109 | /* psock_update_sk_prot may be called with restore=false many times | |
110 | * so the handler must be safe for this case. It will be called | |
111 | * exactly once with restore=true when the psock is being destroyed | |
112 | * and psock refcnt is zero, but before an RCU grace period. | |
113 | */ | |
114 | int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, | |
115 | bool restore); | |
116 | struct proto *sk_proto; | |
117 | struct mutex work_mutex; | |
118 | struct sk_psock_work_state work_state; | |
119 | struct delayed_work work; | |
120 | struct sock *sk_pair; | |
121 | struct rcu_work rwork; | |
122 | }; | |
123 | ||
124 | int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, | |
125 | int elem_first_coalesce); | |
126 | int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, | |
127 | u32 off, u32 len); | |
128 | void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); | |
129 | int sk_msg_free(struct sock *sk, struct sk_msg *msg); | |
130 | int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); | |
131 | void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); | |
132 | void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, | |
133 | u32 bytes); | |
134 | ||
135 | void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); | |
136 | void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); | |
137 | ||
138 | int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, | |
139 | struct sk_msg *msg, u32 bytes); | |
140 | int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, | |
141 | struct sk_msg *msg, u32 bytes); | |
142 | int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, | |
143 | int len, int flags); | |
144 | bool sk_msg_is_readable(struct sock *sk); | |
145 | ||
146 | static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) | |
147 | { | |
148 | WARN_ON(i == msg->sg.end && bytes); | |
149 | } | |
150 | ||
151 | static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) | |
152 | { | |
153 | if (psock->apply_bytes) { | |
154 | if (psock->apply_bytes < bytes) | |
155 | psock->apply_bytes = 0; | |
156 | else | |
157 | psock->apply_bytes -= bytes; | |
158 | } | |
159 | } | |
160 | ||
161 | static inline u32 sk_msg_iter_dist(u32 start, u32 end) | |
162 | { | |
163 | return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); | |
164 | } | |
165 | ||
166 | #define sk_msg_iter_var_prev(var) \ | |
167 | do { \ | |
168 | if (var == 0) \ | |
169 | var = NR_MSG_FRAG_IDS - 1; \ | |
170 | else \ | |
171 | var--; \ | |
172 | } while (0) | |
173 | ||
174 | #define sk_msg_iter_var_next(var) \ | |
175 | do { \ | |
176 | var++; \ | |
177 | if (var == NR_MSG_FRAG_IDS) \ | |
178 | var = 0; \ | |
179 | } while (0) | |
180 | ||
181 | #define sk_msg_iter_prev(msg, which) \ | |
182 | sk_msg_iter_var_prev(msg->sg.which) | |
183 | ||
184 | #define sk_msg_iter_next(msg, which) \ | |
185 | sk_msg_iter_var_next(msg->sg.which) | |
186 | ||
187 | static inline void sk_msg_init(struct sk_msg *msg) | |
188 | { | |
189 | BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); | |
190 | memset(msg, 0, sizeof(*msg)); | |
191 | sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); | |
192 | } | |
193 | ||
194 | static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, | |
195 | int which, u32 size) | |
196 | { | |
197 | dst->sg.data[which] = src->sg.data[which]; | |
198 | dst->sg.data[which].length = size; | |
199 | dst->sg.size += size; | |
200 | src->sg.size -= size; | |
201 | src->sg.data[which].length -= size; | |
202 | src->sg.data[which].offset += size; | |
203 | } | |
204 | ||
205 | static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) | |
206 | { | |
207 | memcpy(dst, src, sizeof(*src)); | |
208 | sk_msg_init(src); | |
209 | } | |
210 | ||
211 | static inline bool sk_msg_full(const struct sk_msg *msg) | |
212 | { | |
213 | return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; | |
214 | } | |
215 | ||
216 | static inline u32 sk_msg_elem_used(const struct sk_msg *msg) | |
217 | { | |
218 | return sk_msg_iter_dist(msg->sg.start, msg->sg.end); | |
219 | } | |
220 | ||
221 | static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) | |
222 | { | |
223 | return &msg->sg.data[which]; | |
224 | } | |
225 | ||
226 | static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) | |
227 | { | |
228 | return msg->sg.data[which]; | |
229 | } | |
230 | ||
231 | static inline struct page *sk_msg_page(struct sk_msg *msg, int which) | |
232 | { | |
233 | return sg_page(sk_msg_elem(msg, which)); | |
234 | } | |
235 | ||
236 | static inline bool sk_msg_to_ingress(const struct sk_msg *msg) | |
237 | { | |
238 | return msg->flags & BPF_F_INGRESS; | |
239 | } | |
240 | ||
241 | static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) | |
242 | { | |
243 | struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); | |
244 | ||
245 | if (test_bit(msg->sg.start, msg->sg.copy)) { | |
246 | msg->data = NULL; | |
247 | msg->data_end = NULL; | |
248 | } else { | |
249 | msg->data = sg_virt(sge); | |
250 | msg->data_end = msg->data + sge->length; | |
251 | } | |
252 | } | |
253 | ||
254 | static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, | |
255 | u32 len, u32 offset) | |
256 | { | |
257 | struct scatterlist *sge; | |
258 | ||
259 | get_page(page); | |
260 | sge = sk_msg_elem(msg, msg->sg.end); | |
261 | sg_set_page(sge, page, len, offset); | |
262 | sg_unmark_end(sge); | |
263 | ||
264 | __set_bit(msg->sg.end, msg->sg.copy); | |
265 | msg->sg.size += len; | |
266 | sk_msg_iter_next(msg, end); | |
267 | } | |
268 | ||
269 | static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) | |
270 | { | |
271 | do { | |
272 | if (copy_state) | |
273 | __set_bit(i, msg->sg.copy); | |
274 | else | |
275 | __clear_bit(i, msg->sg.copy); | |
276 | sk_msg_iter_var_next(i); | |
277 | if (i == msg->sg.end) | |
278 | break; | |
279 | } while (1); | |
280 | } | |
281 | ||
282 | static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) | |
283 | { | |
284 | sk_msg_sg_copy(msg, start, true); | |
285 | } | |
286 | ||
287 | static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) | |
288 | { | |
289 | sk_msg_sg_copy(msg, start, false); | |
290 | } | |
291 | ||
292 | static inline struct sk_psock *sk_psock(const struct sock *sk) | |
293 | { | |
294 | return __rcu_dereference_sk_user_data_with_flags(sk, | |
295 | SK_USER_DATA_PSOCK); | |
296 | } | |
297 | ||
298 | static inline void sk_psock_set_state(struct sk_psock *psock, | |
299 | enum sk_psock_state_bits bit) | |
300 | { | |
301 | set_bit(bit, &psock->state); | |
302 | } | |
303 | ||
304 | static inline void sk_psock_clear_state(struct sk_psock *psock, | |
305 | enum sk_psock_state_bits bit) | |
306 | { | |
307 | clear_bit(bit, &psock->state); | |
308 | } | |
309 | ||
310 | static inline bool sk_psock_test_state(const struct sk_psock *psock, | |
311 | enum sk_psock_state_bits bit) | |
312 | { | |
313 | return test_bit(bit, &psock->state); | |
314 | } | |
315 | ||
316 | static inline void sock_drop(struct sock *sk, struct sk_buff *skb) | |
317 | { | |
318 | sk_drops_add(sk, skb); | |
319 | kfree_skb(skb); | |
320 | } | |
321 | ||
322 | static inline bool sk_psock_queue_msg(struct sk_psock *psock, | |
323 | struct sk_msg *msg) | |
324 | { | |
325 | bool ret; | |
326 | ||
327 | spin_lock_bh(&psock->ingress_lock); | |
328 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { | |
329 | list_add_tail(&msg->list, &psock->ingress_msg); | |
330 | ret = true; | |
331 | } else { | |
332 | sk_msg_free(psock->sk, msg); | |
333 | kfree(msg); | |
334 | ret = false; | |
335 | } | |
336 | spin_unlock_bh(&psock->ingress_lock); | |
337 | return ret; | |
338 | } | |
339 | ||
340 | static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) | |
341 | { | |
342 | struct sk_msg *msg; | |
343 | ||
344 | spin_lock_bh(&psock->ingress_lock); | |
345 | msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); | |
346 | if (msg) | |
347 | list_del(&msg->list); | |
348 | spin_unlock_bh(&psock->ingress_lock); | |
349 | return msg; | |
350 | } | |
351 | ||
352 | static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) | |
353 | { | |
354 | struct sk_msg *msg; | |
355 | ||
356 | spin_lock_bh(&psock->ingress_lock); | |
357 | msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); | |
358 | spin_unlock_bh(&psock->ingress_lock); | |
359 | return msg; | |
360 | } | |
361 | ||
362 | static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, | |
363 | struct sk_msg *msg) | |
364 | { | |
365 | struct sk_msg *ret; | |
366 | ||
367 | spin_lock_bh(&psock->ingress_lock); | |
368 | if (list_is_last(&msg->list, &psock->ingress_msg)) | |
369 | ret = NULL; | |
370 | else | |
371 | ret = list_next_entry(msg, list); | |
372 | spin_unlock_bh(&psock->ingress_lock); | |
373 | return ret; | |
374 | } | |
375 | ||
376 | static inline bool sk_psock_queue_empty(const struct sk_psock *psock) | |
377 | { | |
378 | return psock ? list_empty(&psock->ingress_msg) : true; | |
379 | } | |
380 | ||
381 | static inline void kfree_sk_msg(struct sk_msg *msg) | |
382 | { | |
383 | if (msg->skb) | |
384 | consume_skb(msg->skb); | |
385 | kfree(msg); | |
386 | } | |
387 | ||
388 | static inline void sk_psock_report_error(struct sk_psock *psock, int err) | |
389 | { | |
390 | struct sock *sk = psock->sk; | |
391 | ||
392 | sk->sk_err = err; | |
393 | sk_error_report(sk); | |
394 | } | |
395 | ||
396 | struct sk_psock *sk_psock_init(struct sock *sk, int node); | |
397 | void sk_psock_stop(struct sk_psock *psock); | |
398 | ||
399 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) | |
400 | int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); | |
401 | void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); | |
402 | void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); | |
403 | #else | |
404 | static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) | |
405 | { | |
406 | return -EOPNOTSUPP; | |
407 | } | |
408 | ||
409 | static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) | |
410 | { | |
411 | } | |
412 | ||
413 | static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) | |
414 | { | |
415 | } | |
416 | #endif | |
417 | ||
418 | void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); | |
419 | void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); | |
420 | ||
421 | int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, | |
422 | struct sk_msg *msg); | |
423 | ||
424 | /* | |
425 | * This specialized allocator has to be a macro for its allocations to be | |
426 | * accounted separately (to have a separate alloc_tag). The typecast is | |
427 | * intentional to enforce typesafety. | |
428 | */ | |
429 | #define sk_psock_init_link() \ | |
430 | ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ | |
431 | GFP_ATOMIC | __GFP_NOWARN)) | |
432 | ||
433 | static inline void sk_psock_free_link(struct sk_psock_link *link) | |
434 | { | |
435 | kfree(link); | |
436 | } | |
437 | ||
438 | struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); | |
439 | ||
440 | static inline void sk_psock_cork_free(struct sk_psock *psock) | |
441 | { | |
442 | if (psock->cork) { | |
443 | sk_msg_free(psock->sk, psock->cork); | |
444 | kfree(psock->cork); | |
445 | psock->cork = NULL; | |
446 | } | |
447 | } | |
448 | ||
449 | static inline void sk_psock_restore_proto(struct sock *sk, | |
450 | struct sk_psock *psock) | |
451 | { | |
452 | if (psock->psock_update_sk_prot) | |
453 | psock->psock_update_sk_prot(sk, psock, true); | |
454 | } | |
455 | ||
456 | static inline struct sk_psock *sk_psock_get(struct sock *sk) | |
457 | { | |
458 | struct sk_psock *psock; | |
459 | ||
460 | rcu_read_lock(); | |
461 | psock = sk_psock(sk); | |
462 | if (psock && !refcount_inc_not_zero(&psock->refcnt)) | |
463 | psock = NULL; | |
464 | rcu_read_unlock(); | |
465 | return psock; | |
466 | } | |
467 | ||
468 | void sk_psock_drop(struct sock *sk, struct sk_psock *psock); | |
469 | ||
470 | static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) | |
471 | { | |
472 | if (refcount_dec_and_test(&psock->refcnt)) | |
473 | sk_psock_drop(sk, psock); | |
474 | } | |
475 | ||
476 | static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) | |
477 | { | |
478 | read_lock_bh(&sk->sk_callback_lock); | |
479 | if (psock->saved_data_ready) | |
480 | psock->saved_data_ready(sk); | |
481 | else | |
482 | sk->sk_data_ready(sk); | |
483 | read_unlock_bh(&sk->sk_callback_lock); | |
484 | } | |
485 | ||
486 | static inline void psock_set_prog(struct bpf_prog **pprog, | |
487 | struct bpf_prog *prog) | |
488 | { | |
489 | prog = xchg(pprog, prog); | |
490 | if (prog) | |
491 | bpf_prog_put(prog); | |
492 | } | |
493 | ||
494 | static inline int psock_replace_prog(struct bpf_prog **pprog, | |
495 | struct bpf_prog *prog, | |
496 | struct bpf_prog *old) | |
497 | { | |
498 | if (cmpxchg(pprog, old, prog) != old) | |
499 | return -ENOENT; | |
500 | ||
501 | if (old) | |
502 | bpf_prog_put(old); | |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
507 | static inline void psock_progs_drop(struct sk_psock_progs *progs) | |
508 | { | |
509 | psock_set_prog(&progs->msg_parser, NULL); | |
510 | psock_set_prog(&progs->stream_parser, NULL); | |
511 | psock_set_prog(&progs->stream_verdict, NULL); | |
512 | psock_set_prog(&progs->skb_verdict, NULL); | |
513 | } | |
514 | ||
515 | int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); | |
516 | ||
517 | static inline bool sk_psock_strp_enabled(struct sk_psock *psock) | |
518 | { | |
519 | if (!psock) | |
520 | return false; | |
521 | return !!psock->saved_data_ready; | |
522 | } | |
523 | ||
524 | #if IS_ENABLED(CONFIG_NET_SOCK_MSG) | |
525 | ||
526 | #define BPF_F_STRPARSER (1UL << 1) | |
527 | ||
528 | /* We only have two bits so far. */ | |
529 | #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) | |
530 | ||
531 | static inline bool skb_bpf_strparser(const struct sk_buff *skb) | |
532 | { | |
533 | unsigned long sk_redir = skb->_sk_redir; | |
534 | ||
535 | return sk_redir & BPF_F_STRPARSER; | |
536 | } | |
537 | ||
538 | static inline void skb_bpf_set_strparser(struct sk_buff *skb) | |
539 | { | |
540 | skb->_sk_redir |= BPF_F_STRPARSER; | |
541 | } | |
542 | ||
543 | static inline bool skb_bpf_ingress(const struct sk_buff *skb) | |
544 | { | |
545 | unsigned long sk_redir = skb->_sk_redir; | |
546 | ||
547 | return sk_redir & BPF_F_INGRESS; | |
548 | } | |
549 | ||
550 | static inline void skb_bpf_set_ingress(struct sk_buff *skb) | |
551 | { | |
552 | skb->_sk_redir |= BPF_F_INGRESS; | |
553 | } | |
554 | ||
555 | static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, | |
556 | bool ingress) | |
557 | { | |
558 | skb->_sk_redir = (unsigned long)sk_redir; | |
559 | if (ingress) | |
560 | skb->_sk_redir |= BPF_F_INGRESS; | |
561 | } | |
562 | ||
563 | static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) | |
564 | { | |
565 | unsigned long sk_redir = skb->_sk_redir; | |
566 | ||
567 | return (struct sock *)(sk_redir & BPF_F_PTR_MASK); | |
568 | } | |
569 | ||
570 | static inline void skb_bpf_redirect_clear(struct sk_buff *skb) | |
571 | { | |
572 | skb->_sk_redir = 0; | |
573 | } | |
574 | #endif /* CONFIG_NET_SOCK_MSG */ | |
575 | #endif /* _LINUX_SKMSG_H */ |